Search is not available for this dataset
text
string
meta
dict
\documentclass[12pt,a4paper]{article} \usepackage{microtype} \usepackage{amsfonts} \usepackage{amsthm} \usepackage{graphicx} \newcommand{\dpar}[1]{\left(#1\right)} \newcommand{\N}{\mathbb{N}} \newcommand{\R}{\mathbb{R}} \theoremstyle{definition} \newtheorem{ex}{Exercise}[section] \title{Physics 1: Class 4} \author{Max Jauregui} \begin{document} \maketitle \section{Acceleration} Let us consider a particle that moves along a straight line and let us suppose that we know the velocities of the particle at the instants $t_1$ and $t_2$. We define the \emph{average acceleration} of the particle bewteen $t_1$ and $t_2$ as $$\overline{a}=\frac{v(t_2)-v(t_1)}{t_2-t_1}\,.$$ In addition, we define the \emph{instantaneous acceleration} of the particle at an instant $t$ by $$a(t)=\lim_{\Delta t\to 0}\frac{v(t+\Delta t)-v(t)}{\Delta t}=\frac{dv}{dt}\,.$$ Since $v(t)=dx/dt$, it follows from the last equation that $$a(t)=\frac{d}{dt}\dpar{\frac{dx}{dt}}=\frac{d^2x}{dt^2}\,,$$ where the last term is called the second derivative of the function $x$ at the point $t$. Since $a(t)$ is the derivative of the velocity at the instant $t$, in a $v$ vs $t$ graph, $a(t)$ will be given by the slope of the line that is tangent to the curve at the instant $t$. On the other hand, since $a(t)$ is the second derivative of the position at the instant $t$, in an $x$ vs $t$ graph, $a(t)$ will be given by the curvature of the curve at the instant $t$. Basically, a curve has positive curvature at a point if it has the form $\smile$ and negative curvature if it has the form $\frown$. An inflection point of a curve is a point where the curvature changes its sign. The curvature of the curve at this point is zero. \begin{ex} Let $x(t)=5t^3-10t+2$ be the position of a particle in meters, where $t$ is measured in seconds. Find the acceleration of the particle at the instant $1\,\mathrm{s}$. \emph{Answer:} $a(1)=30\,\mathrm{m/s^2}$. \end{ex} \begin{ex} Considering the $x$ vs $t$ graph given in Fig.~\ref{fig:xvst}, answer the following: \begin{enumerate} \item[(i)] What are the signs of the acceleration between the instants $1\,\mathrm{s}$ and $2\,\mathrm{s}$? \item[(ii)] Is the acceleration negative in some instant between $4\,\mathrm{s}$ and $6\,\mathrm{s}$? \item[(iii)] What is the sign of the acceleration when the velocity of the particle attains its maximum value? \item[(iv)] Is there an instant where the acceleration is zero? \end{enumerate} \begin{figure}[ht] \centering \includegraphics[width=0.5\textwidth,keepaspectratio]{figures/xvst.pdf} \caption{$x$ vs $t$ graph.} \label{fig:xvst} \end{figure} \end{ex} \section{Constant acceleration} Let us consider a particle that moves in a straight line with constant acceleration $a_0$, i.e., $a(t)=a_0$ for all $t\ge 0$. We will be interested in obtaining the expression of the position of the particle at an arbitrary instant $t$. We begin by observing that, since $a(t)$ is the derivative of the velocity at the instant $t$, in an analogous way to the case when we obtained the position of a particle from the expression of its velocity, we will have $$v(t)-v(t_0)=\int_{t_0}^{t}a(t')\,dt'=a_0(t-t_0)\,.$$ Then, \begin{equation} \label{eq:1} v(t)=v_0+a_0(t-t_0)\,, \end{equation} where $v_0=v(t_0)$. Now, the displacement of the particle between two instants $t_0$ and $t$ is given by $$x(t)-x(t_0)=\int_{t_0}^{t}v(t')\,dt'=v_0(t-t_0)+\frac{a_0}{2}(t^2-t_0^2)-a_0t_0(t-t_0)\,.$$ Hence, \begin{equation} \label{eq:2} x(t)=x_0+v_0(t-t_0)+\frac{a_0}{2}(t-t_0)^2\,, \end{equation} where $x_0=x(t_0)$. From Eqs.~(\ref{eq:1}) and~(\ref{eq:2}) we can obtain other useful equations. For instance, it follows from Eq.~(\ref{eq:1}) that $$t-t_0=\frac{v(t)-v_0}{a_0}\,.$$ Using this in Eq.~(\ref{eq:2}), we can obtain that $$v^2(t)-v_0^2=2a_0[x(t)-x_0]\,.$$ Moreover, using the expression of $a_0$, obtained from Eq.~(\ref{eq:1}), we obtain $$x(t)-x_0=\dpar{\frac{v(t)+v_0}{2}}(t-t_0)\,.$$ \end{document}
{ "alphanum_fraction": 0.6902632886, "avg_line_length": 36.2702702703, "ext": "tex", "hexsha": "c683435777b21b562ee4df099cf2ef8c6fbf5cc8", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fe1b3ba629ab475d92b6140f49cf397f0b6120fc", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "maxjaure/UEM-DFI", "max_forks_repo_path": "fisica1/aula4.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fe1b3ba629ab475d92b6140f49cf397f0b6120fc", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "maxjaure/UEM-DFI", "max_issues_repo_path": "fisica1/aula4.tex", "max_line_length": 93, "max_stars_count": 1, "max_stars_repo_head_hexsha": "fe1b3ba629ab475d92b6140f49cf397f0b6120fc", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "maxjaure/UEM-DFI", "max_stars_repo_path": "fisica1/aula4.tex", "max_stars_repo_stars_event_max_datetime": "2019-03-25T18:25:46.000Z", "max_stars_repo_stars_event_min_datetime": "2019-03-25T18:25:46.000Z", "num_tokens": 1408, "size": 4026 }
%% Unfortunately for the contents to contain %% the "Parts" lines successfully, hyperref %% needs to be disabled. \documentclass[nohyper,nobib]{tufte-book} \input{preamble.tex} \addbibresource{references.bib} \title{Principles of Robot\\ Autonomy} \author{Joseph Lorenzetti, Marco Pavone} \date{\today} \setcounter{tocdepth}{1} \renewcommand{\plainauthor}{\leftmark} \begin{document} \maketitle \input{source/intro.tex} \tableofcontents % Organized differently than lecture order! \part{Robot Motion Planning \& Control} \chapter{Mobile Robot Kinematics} \input{source/ch01.tex} \chapter{Open-Loop Motion Planning \& Control} \input{source/ch02.tex} \chapter{Closed-Loop Motion Planning \& Control} \input{source/ch03.tex} \chapter{Optimal Control and Trajectory Optimization} \input{source/ch04.tex} \chapter{Search-Based Motion Planning} \input{source/ch05.tex} \chapter{Sampling-Based Motion Planning} \input{source/ch06.tex} \part{Robot Perception} \chapter{Introduction to Robot Sensors} \input{source/ch07.tex} \chapter{Camera Models and Calibration} \input{source/ch08.tex} \chapter{Stereo Vision and Structure From Motion} \input{source/ch09.tex} \chapter{Image Processing} \input{source/ch10.tex} \chapter{Information Extraction} \input{source/ch11.tex} \chapter{Modern Computer Vision Techniques} \input{source/ch12.tex} \part{Robot Localization} \chapter{Introduction to Localization and Filtering} \input{source/ch13.tex} \chapter{Parametric Filters} \input{source/ch14.tex} \chapter{Nonparametric Filters} \input{source/ch15.tex} \chapter{Robot Localization} \input{source/ch16.tex} \chapter{Simultaneous Localization and Mapping (SLAM)} \input{source/ch17.tex} \chapter{Sensor Fusion} \input{source/ch18.tex} \part{Robot Decision Making} \chapter{Finite State Machines} \input{source/ch19.tex} \chapter{Sequential Decision Making} \input{source/ch20.tex} \chapter{Reinforcement Learning} \input{source/ch21.tex} \chapter{Imitation Learning} \input{source/ch22.tex} \part{Robot Software} \chapter{Robot System Architectures} \input{source/ch23.tex} \chapter{The Robot Operating System} \input{source/ch24.tex} \part{Advanced Topics in Robotics} \chapter{Formal Methods} \input{source/ch25.tex} \chapter{Robotic Manipulation} \input{source/ch26.tex} \appendix \part{Appendices} \chapter{Machine Learning} \input{source/app01.tex} \printbibliography \end{document}
{ "alphanum_fraction": 0.7871183605, "avg_line_length": 24.90625, "ext": "tex", "hexsha": "638ce15868b5b40c8147f8889ea79e02b83d3440", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "852ce0fd1361d95576f72558d2c29d8610ced652", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "StanfordASL/Principles-of-Robot-Autonomy", "max_forks_repo_path": "tex/combined.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "852ce0fd1361d95576f72558d2c29d8610ced652", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "StanfordASL/Principles-of-Robot-Autonomy", "max_issues_repo_path": "tex/combined.tex", "max_line_length": 54, "max_stars_count": 5, "max_stars_repo_head_hexsha": "852ce0fd1361d95576f72558d2c29d8610ced652", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "StanfordASL/Principles-of-Robot-Autonomy", "max_stars_repo_path": "tex/combined.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-10T14:15:38.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-23T16:03:45.000Z", "num_tokens": 638, "size": 2391 }
\label{sec:data-fitting} We can fit a triangular mesh, $\M$, to a set of data points, $\{\d_i; i=0 \ldots n-1\}$ by minimizing the sum of the $l_2$ distances from the points to the mesh: \begin{equation} f(\M) = \sum_{i=0}^{n-1} \| \d_i - \Pr_\M (\d_i) \|^2 , \end{equation} where $\Pr_\M (\d_i)$ is the point on $\M$ closest to $\d_i$, that is, the {\em projection} of $\d_i$ on $\M$. Note that $f:\Reals^{3n} \mapsto \Re$, where $n$ is the number of vertices in $\M$. We compute $\Pr_\M (\d)$ by minimizing $\| \d - \Pr_\s (\d) \|^2$ over all simplices $\s \in \M$. We need only consider the faces of $\M$, those edges not in any face, and the vertices not in any edge, because the closest point on a face must at least as the closest point on any of its edges, and similarly for vertices. More generally, spatial binning of the simplices and data can greatly reduce the number of simplices that need to be examined. The following sections consider the projection of a single data point $\d$ on a mesh $\M$, and the gradient of the squared distance, as a function of the vertex positions $\p(\v)$. Unfortunately, derivatives of the distance function are not continuous. Second derivative discontinuities occur when a data point is on the boundary of a 'watershed' region, the set of points projecting on a vertex or the interior of an edge or face. Gradient discontinuities are encountered when a data point is equidistant from 2 distinct closest mesh points. \subsection{Distance to vertex} \label{sec:Distance-to-vertex} Let $\p = \p(\v)$ be the position of a particular vertex $\v$, and $\d$ the 3d data point. It follows from equation \ref{eq:l2-gradient} that \begin{equation} \label{eq:vertex-distance-gradient} \Gc{\p}{\| \p - \d \|^2}{\q} = 2 ( \q - \d ). \end{equation} The distance to the nearest vertex in a set of vertices $\V$ is: $\min_{\v \in \V} \| \p(\v) - \d \|^2$. If $\v^{\mathrm min}$ is the minimizing vertex, and $\p^{\mathrm min}$ its position, then the partial gradient with respect to the position of any other vertex is zero, and the partial gradient with respect to $\p^{\mathrm min}$ is given in equation \ref{eq:vertex-distance-gradient}. Note that the gradient is only defined and continuous while $\d$ is within the interior of the Voronoi regions surrounding the vertices. \subsection{Distance to edge} \label{sec:Distance-to-edge} Let the edge $\e$ have end points $\p = (\p_0, \p_1) \in \Reals^6$. We can write the projection of a data point $\d$ on $\e$ as: \begin{equation} \Pr_\p (\d) = b_0(\p) \p_0 + b_1(\p) \p_1 \end{equation} where \begin{eqnarray} b_0(\p) & = & \min\left(0,\max\left(1, {{ (\d - \p_1) \bullet (\p_0 - \p_1) } \over { \| \p_0 - \p_1 \|^2 } }\right) \right) \\ b_1(\p) & = & 1 - b_0(\p) \nonumber \end{eqnarray} \begin{eqnarray} \label{eq:edge-distance-gradient-derivation} \De{\p_0}{ \| \Pr_{\p} (\d) - \d \|^2 }{\q} & = & 2 \left( \Pr_{\q} (\d) - \d \right)^\dagger \De{\p_0}{\Pr_{\p} (\d) }{\q} \\ & = & 2 \left( \Pr_{\q} (\d) - \d \right)^\dagger \De{\p_0}{\left[ b_0(\p)\p_0 + b_1(\p)\p_1 \right]}{\q} \nonumber \\ & = & 2 \left( \Pr_{\q} (\d) - \d \right)^\dagger \De{\p_0}{\left[ b_0(\p)\p_0 + (1 - b_0(\p))\p_1 \right]}{\q} \nonumber \\ & = & 2 \left( \Pr_{\q} (\d) - \d \right)^\dagger \De{\p_0}{\left[ b_0(\p)(\p_0 - \p_1) \right]}{\q} \nonumber \\ & = & 2 \left( \Pr_{\q} (\d) - \d \right)^\dagger \left[ b_0(\q) \I + (\q_0 - \q_1) \otimes \Gc{\p_0}{b_0(\p)}{\q} \right] \nonumber \end{eqnarray} Because $\left( \Pr_{\q} (\d) - \d \right)$ is orthogonal to $\left( \q_0 - \q_1 \right)$, we get: \begin{eqnarray} \label{eq:edge-distance-gradient} \Gc{\p_0}{ \| \Pr_{\p} (\d) - \d \|^2 }{\q} & = & 2 b_0(\q) \left[ \Pr_{\q} (\d) - \d \right] \\ \Gc{\p_1}{ \| \Pr_{\p} (\d) - \d|^2 }{\q} & = & 2 b_1(\q) \left[ \Pr_\q (\d) - \d \right] \nonumber \end{eqnarray} As in the vertex case, the distance to the nearest edge in a set of edges $\E$ is: \begin{equation} \| \Pr_{\E} (\d) - \d|^2 = \min_{\e \in \E} \| \Pr_{\p(\e)}(\d) - \d \|^2 \end{equation} If $\e^{\min}$ is the minimizing edge, $\v_0^{\min}$ and $\v_1^{\min}$ its vertices, and $\p_0^{\min}$ and $\p_1^{\min}$ the corresponding endpoints, then the partial gradient with respect to the position of any other vertex is zero, and the partial gradient with respect to $\p_0^{\min}$ and $\p_1^{\min}$ is given in equation \ref{eq:edge-distance-gradient}. The total gradient is defined and continuous when $\d$ is within the union of the watershed regions of $\e^{\min}$ and its vertices. It is also continuous where the watershed of one of the vertices meets the watershed of any of the edges containing that vertex. It is not if $\d$ lies on the boundary of the watershed of $\e^{\min}$ and the watershed of an edge with which it does not share a vertex. \subsection{Distance to face} \label{sec:Distance-to-face} Let the face $\f$ have corner points $\p = (\p_0, \p_1, \p_2) \in \Reals^9$. As in the edge case, we can write the projection of a data point $\d$ on $\f$ in terms of the barycentric coordinates as: \begin{equation} \Pr_\p (\d) = b_0(\p) \p_0 + b_1(\p) \p_1 + b_2(\p) \p_2, \end{equation} and, by an argument simlar to that used in equation \ref{eq:edge-distance-gradient-derivation}, we can show that \begin{eqnarray} \label{eq:face-distance-gradient} \Gc{\p_0}{ \| \Pr_{\p} (\d) - \d \|^2 }{\q} & = & 2 b_0(\q) \left[ \Pr_{\q} (\d) - \d \right] \\ \Gc{\p_1}{ \| \Pr_{\p} (\d) - \d|^2 }{\q} & = & 2 b_1(\q) \left[ \Pr_\q (\d) - \d \right] \nonumber \\ \Gc{\p_2}{ \| \Pr_{\p} (\d) - \d|^2 }{\q} & = & 2 b_2(\q) \left[ \Pr_\q (\d) - \d \right] \nonumber \end{eqnarray} Computing the barycentric coordinates for the projection on a face (triangle) is slightly more complicated than for an edge (line segment). First center the problem by letting $\v = \p - \p_0$, $\v_1 = \p_1 - \p_0$, and $\v_2 = \p_2 - \p_0$. Then compute the raw, unbounded barycentric coordinates of the projection of $\p$ onto the plane spanned by the triangle: \begin{eqnarray} r_0(\p) & = & 1 - r_1(\p) - r_2(\p) \\ r_1(\p) & = & v \bullet {{\v_1 \perp \v_2} \over {\| \v_1 \perp \v_2 \|^2} } \nonumber \\ r_2(\p) & = & v \bullet {{\v_2 \perp \v_1} \over {\| \v_2 \perp \v_1 \|^2} } \nonumber \end{eqnarray} To correctly bound the raw coordinates to numbers between 0 and 1, we need to determine whether the projected point is in the interior of the triangle, on one of the edges, or on one of the vertices. \begin{description} \item[Vertex case:] If any 2 of the $r_i$ are negative, then $\p$ projects on the remaining vertex. Set the 2 $b_i$ corresponding to the negative $r_i$ to 0 and the remaining $b_i$ to 1. \item[Edge case:] If any single 1 of the $r_i$ is negative, then $\p$ projects on the opposite edge. Set the $b_i$ corresponding to the negative $r_i$ to 0. Go to \autoref{sec:Distance-to-edge} to see how to compute the remaining barycentric coordinates by projecting on the edge \item[Interior case:] If none of the $r_i$ is negative, then $\p$ projects on the interior and each $b_i = r_i$ \end{description}
{ "alphanum_fraction": 0.6531595353, "avg_line_length": 32.8279069767, "ext": "tex", "hexsha": "23014f8df43d747c1d842f3291b21e65c6267d3f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "970bcbf5e31e40017b2333039e1505c7ea2f56dd", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "palisades-lakes/les-elemens", "max_forks_repo_path": "doc/old/fotm/data-fitting.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "970bcbf5e31e40017b2333039e1505c7ea2f56dd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "palisades-lakes/les-elemens", "max_issues_repo_path": "doc/old/fotm/data-fitting.tex", "max_line_length": 87, "max_stars_count": null, "max_stars_repo_head_hexsha": "970bcbf5e31e40017b2333039e1505c7ea2f56dd", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "palisades-lakes/les-elemens", "max_stars_repo_path": "doc/old/fotm/data-fitting.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2579, "size": 7058 }
\section{Exercise solutions: control flow} % (fold) \label{sec:exercise_solutions_control_flow} \begin{frame}\frametitle{For loops} \framesubtitle{Problem} Print the numbers 1 to 100 that are divisible by 5 but not by 3 \end{frame} \begin{frame}\frametitle{For loops} \framesubtitle{Solution} \codeblock{code/exsol_control_for.py} \end{frame} \begin{frame}\frametitle{While loops} \framesubtitle{Exercise} The smallest number that is divisible by 2, 3 and 4 is 12. Find the smallest number that is divisible by all integers between 1 and 10. \end{frame} \begin{frame}\frametitle{While loop} \framesubtitle{Solution} \codeblock{code/exsol_control_while.py} \end{frame} \begin{frame}\frametitle{Collatz sequence} \framesubtitle{Problem} A Collatz sequence is formed as follows: We start with some number $x_0$, and we find the next number in the sequence by \[ x_{i+1} = \begin{cases} x_i / 2 & \text{ if $x_i$ is even}\\ 3x_i + 1 & \text{ if $x_i$ is odd} \end{cases} \] If $x_i = 1$, we stop iterating and have found the full sequence. It is conjectured, though not proven, that every chain eventually ends at $1$. Print the Collatz sequence starting at $x_0 = 103$. \end{frame} \begin{frame}\frametitle{Collatz sequence} \framesubtitle{Solution} \codeblock{code/exsol_control_collatz.py} \end{frame} % section exercise_solutions_control_flow (end)
{ "alphanum_fraction": 0.6960322798, "avg_line_length": 24.3770491803, "ext": "tex", "hexsha": "582f1a723d1a4f3f461d92d1e3bd33102700ebf9", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-05-13T07:36:06.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-24T03:31:02.000Z", "max_forks_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "naskoch/python_course", "max_forks_repo_path": "lectures/tex/exsol_control.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "naskoch/python_course", "max_issues_repo_path": "lectures/tex/exsol_control.tex", "max_line_length": 83, "max_stars_count": 4, "max_stars_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "naskoch/python_course", "max_stars_repo_path": "lectures/tex/exsol_control.tex", "max_stars_repo_stars_event_max_datetime": "2020-04-18T21:09:03.000Z", "max_stars_repo_stars_event_min_datetime": "2015-08-10T17:46:55.000Z", "num_tokens": 434, "size": 1487 }
\documentclass{beamer} %\setbeamertemplate{note page}[plain] %\setbeameroption{show notes} \usepackage{physics} \usepackage{amsmath} \usepackage{tikz} \usetikzlibrary{tikzmark} \setbeamertemplate{caption}[numbered] \newcommand{\nl}{\\ \vspace{1em}} \mode<presentation> { % The Beamer class comes with a number of default slide themes % which change the colors and layouts of slides. Below this is a list % of all the themes, uncomment each in turn to see what they look like. %\usetheme{default} %\usetheme{AnnArbor} %\usetheme{Antibes} %\usetheme{Bergen} %\usetheme{Berkeley} %\usetheme{Berlin} %\usetheme{Boadilla} %\usetheme{CambridgeUS} %\usetheme{Copenhagen} %\usetheme{Darmstadt} %\usetheme{Dresden} %\usetheme{Frankfurt} %\usetheme{Goettingen} %\usetheme{Hannover} \usetheme{Ilmenau} %\usetheme{JuanLesPins} %\usetheme{Luebeck} %\usetheme{Madrid} %\usetheme{Malmoe} %\usetheme{Marburg} %\usetheme{Montpellier} %\usetheme{PaloAlto} %\usetheme{Pittsburgh} %\usetheme{Rochester} %\usetheme{Singapore} %\usetheme{Szeged} %\usetheme{Warsaw} % As well as themes, the Beamer class has a number of color themes % for any slide theme. Uncomment each of these in turn to see how it % changes the colors of your current slide theme. %\usecolortheme{albatross} %\usecolortheme{beaver} %\usecolortheme{beetle} %\usecolortheme{crane} %\usecolortheme{dolphin} %\usecolortheme{dove} %\usecolortheme{fly} %\usecolortheme{lily} %\usecolortheme{orchid} %\usecolortheme{rose} %\usecolortheme{seagull} %\usecolortheme{seahorse} %\usecolortheme{whale} %\usecolortheme{wolverine} %\setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line %\setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line %\setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line } \usepackage{graphicx} % Allows including images \usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables %---------------------------------------------------------------------------------------- % TITLE PAGE %---------------------------------------------------------------------------------------- \title[Quantum Love]{Quantum Love: \\ The Interplay of Science with Emotion} % The short title appears at the bottom of every slide, the full title is only on the title page \author{Daniel Prelipcean} % Your name \institute[JUB Stem Slam 2018] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space {Jacobs University Bremen \\ % Your institution for the title page \medskip \textit{[email protected]} % Your email address } \date{\today} % Date, can be changed to a custom date \begin{document} \begin{frame} \titlepage % Print the title page as the first slide \note{Physics is like sex} \end{frame} %---------------------------------------------------------------------------------------- % PRESENTATION SLIDES %---------------------------------------------------------------------------------------- %------------------------------------------------ \section{Introduction} % Sections can be created in order to organize your presentation into discrete blocks, all sections and subsections are automatically printed in the table of contents as an overview of the talk %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Physics is Fun!} \begin{columns} \column{0.5\textwidth} "Physics is like sex: sure, it may give some practical results, but that's not why we do it." \nl \uncover<2->{(Richard Feynman)} \column{0.5\textwidth} \begin{figure} \uncover<2->{\includegraphics[scale=0.35]{Pics/Richard_Feynman_Nobel.jpg} \caption{Nobel Prize Picture \footnotemark}} \end{figure} \end{columns} \only<2->{\footnotetext[1]{wikipedia}} \end{frame} %------------------------------------------------ \begin{frame} \frametitle{Motivation and Idea} \begin{figure} \includegraphics[width=0.5\textwidth]{Pics/Low-Testosterone-Symptoms.jpg} \caption{Male Testosterone Levels \footnotemark} \label{pic:maletestosteroneleveles} \end{figure} \only<2->{\footnotetext[2]{become-the-alpha-male.com/blog/category/testosterone/}} \note{Here is an interesting graph about male`s testosterone levels as functions of time throughout the day. The male organism has a hormonal peak in the morning and then it widely fluctuates. That is why gentlem enare especially horny when they wake up. Coincidentally, the quantum physics lectures at Jacobs are also in the morning, and so you end up with a classroom full of horny physicists, learning about how to get excited states. In of these lecture, the idea of Quantum Sex appeared, and hence this presentation.} \end{frame} %------------------------------------------------ \begin{frame} \frametitle{Disclaimer} \uncover<2->{ If you feel offended, let me know such that I can skip the respective part of my presentation. \nl If you feel offended, but do not want to speak up, let me know afterwards, such that I can change it and improve in future presentations. } \end{frame} %------------------------------------------------ \section{Wavefunctions} % A subsection can be created just before a set of slides with a common theme to further break down your presentation into chunks \begin{frame} \frametitle{Basics of Quantum Mechanics} The main object in Quantum Mechanics is the wavefunction $\psi$. \uncover<2->{ Employing Dirac notation, this is written as: } \begingroup \huge \begin{align*} \onslide<4->{\tikzmark{a}\hat{\mathbb{O}}}\uncover<2->{ \ket{\psi}} & \onslide<5->{=\tikzmark{b} o \ket{\psi}} \end{align*} \begin{equation*} \onslide<8->{\widehat{Feeling}} \onslide<7->{\vcenter{\hbox{\includegraphics[width=0.2\textwidth]{Pics/pic_me.jpg}}}} \onslide<9->{=\text{nervous} \vcenter{\hbox{\includegraphics[width=0.2\textwidth]{Pics/pic_me.jpg}}}} \end{equation*} \uncover<6->{ \begin{tikzpicture}[remember picture,overlay] \draw[<-] ([shift={(10pt,-5pt)}]pic cs:a) |- ([shift={(-10pt,-15pt)}]pic cs:a) node[anchor=east] {$\scriptstyle \text{operator}$}; \draw[->] ([shift={(10pt,-5pt)}]pic cs:b) |- ([shift={(20pt,-15pt)}]pic cs:b) node[anchor=west] {$\scriptstyle \text{eigenvalue}$}; \end{tikzpicture} } \endgroup \note{Fourier transform: position-momentum space with gentlemen-ladies space} \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{States Spaces} Superposition Principle: Male State Space \begin{equation} \ket{m(x)}= \sum_{i=0}^n c_i \ket{m_i(x)} \end{equation} %\begingroup %\huge %\begin{equation*} %\uncover<2->{\vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/pic_me.jpg}}}} \uncover<3->{= c_1 \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/face.jpg}}}} \uncover<4->{ + c_2 \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/crop.JPG}}}} \uncover<5->{ + c_3 \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/profile_pic_Face.png}}}} \uncover<6->{ + \hdots} %\end{equation*} %\endgroup \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Fourier transform between gentlemen and ladies space} \begin{equation} \uncover<2->{\ket{w(k)} =} \uncover<1->{\sum_j c_j} \uncover<4->{ \frac{1}{(\sqrt{2\pi})^d} \int d^d x} \ \uncover<1->{\ket{m_j(x)}} \uncover<3->{e ^{-i k x}} \uncover<5->{\delta(k_\mu k^\mu + m^2)} \uncover<6->{ \Theta(k_\mu x^\mu) } \end{equation} \uncover<2->{ where:} \begin{enumerate} \uncover<3->{\item the exponential = our first attempt to understand something } \uncover<4->{\item $d$ dimensions = cannot even comprehend in how many ways a woman can think } \uncover<5->{\item the delta function $\delta$ = engage in a conversation only when appropriate } \uncover<6->{\item step function $\Theta(\mu)$ = apologize when mistaken } \end{enumerate} \note{Being male, it is very convenient for me to work in the male space. But the female space is a complete unknown for me: And this bring a lot of complications along the way} \end{frame} %---------------------------------------------------------------------------------------- \section{Operators} \begin{frame} \frametitle{The Love operator} \uncover<2->{Let $\hat{L}$ be the Love operator, with the desired property:} \uncover<3->{ \begin{equation} \hat{L} \ket{w} = \text{(loves me)} \ket{w} \end{equation} } \uncover<4->{Problem: Hard to find exact representation of $\hat{L}$ for different ladies $\ket{w_j}$.} \nl \uncover<5->{\textbf{Daniel`s Conjecture:}} \uncover<6->{One can find a suitable representation of $\hat{L}$ only for a single woman $\ket{w} = \ket{g}$, that is, the girlfriend.} \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Common Approximations (1)} \begin{equation*} \uncover<2->{\hat{L} \ket{w}}\uncover<3->{ \approx \widehat{\text{Flowers}} \ket{w} } \uncover<4->{ = \text{impressed} \ket{w}} \end{equation*} \begingroup \Large \begin{equation*} \uncover<6->{\widehat{\text{Flowers}}} \uncover<5->{\vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/Nicoleta.jpg}}}} \uncover<7->{= \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/flowers.JPG}}}} \uncover<8->{= \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/impre.JPG}}}} \end{equation*} \endgroup \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Common Approximations (2)} \begin{equation*} \uncover<2->{\hat{L} \ket{w}}\uncover<3->{ \approx \widehat{\text{Romantic Dinner}} \ket{w} } \uncover<4->{ = \text{impressed} \ket{w}} \end{equation*} \begingroup \Large \begin{equation*} \uncover<6->{\widehat{\text{Romantic Dinner}}} \uncover<5->{\vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/Nicoleta.jpg}}}} \uncover<7->{= \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/dinner.jpg}}}} \uncover<8->{= \vcenter{\hbox{\includegraphics[width=0.15\textwidth]{Pics/impre.JPG}}}} \end{equation*} \endgroup \note{What I was particularly interested in is the love operator. Usually, one knows how to apply it to oneself, but no to other people. Meaning that it is hard to find the exact representation of this operator for different partners.} \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{The Cheating operator} \uncover<2->{ Operators may change the state completely, e.g. the cheating operator $\hat{C}$. } \nl \uncover<3->{ Let $\ket{g}$ be your girlfriend, and $\ket{w}$ be another girl. Then: \begin{equation} \hat{S} \ket{w} = \hat{C} \ket{g} = \uncover<4->{0 \ket{0} = 0} \end{equation} } \uncover<4->{For eternity.} \nl \uncover<5->{\textbf{Conclusion:} Not all operator equations are eingenvalue problems.} \note{If you have sex with another girl, which qualifies as cheating and if your girlfriend finds out, which she always does, then you lose your girlfriend. So the state collapses to 0. Moreover, you are not the only one acting on these states, so they may change without notice. That is why, one has to act rather quickly } \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{The Sex operator } \uncover<2->{ Let $\hat{S}$ be the sex operator, with the desired property: \begin{equation} \hat{S} \ket{g} = \text{sex} \ket{g} \end{equation} } \uncover<3->{ However, it can be the case that: \begin{equation} \hat{S} \ket{g} =\text{harrasment} \ket{g} \end{equation} So be very careful with this one! } \nl \uncover<4->{\textbf{Hopeful Lemma:} As $t \to \infty$, the above case will happen with probability $P= 0$, that is, never again.} \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{The Sex operator } Practically, one may have to apply it multiple times to reach the desired result: \begin{equation} \hat{S}^n \ket{g} = \text{sex} \ket{g} \end{equation} \uncover<2->{Scientists are especially interested in limit cases:} \uncover<3->{ e.g. $n = 0$, i.e. you do not have to do anything and get: \begin{equation} \hat{S}^0 \ket{g} = \ket{g} = sex \ket{g} \end{equation} } \uncover<4->{\textbf{Open question:} Prove whether such states exist (or not).} \note{As a side note, there are people who are more physical, so they are interested in the sex operator $\hat{S}$, but it may happen that this operator cannot be always applied! So be very careful with this one. As physicists, we are interested in the limiting cases to find inconsistencies or evidence. I still have to prove that such girls exist. } \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{The Love operator (yet again) } \uncover<2->{A more realistic scenario is the following: \begin{equation} \hat{L}^n \ket{g} = \Pi_{j=1}^N (L_j) \ket{g} = \text{(in love)} \ket{g} \end{equation} } \uncover<3->{That is, one has to apply it multiple times, possibly using different representations or common approximations (as before).} \nl \uncover<4->{ Most interesting case: $n \to \infty$.} \nl \uncover<5->{ \textbf{Problem:}\\ How to apply an infinite amount of work?} \uncover<6->{($\approx 0 $ in her eyes)} \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{The Love operator solution} \uncover<2->{ Answer: Apply the Interest operator $\hat{I}$: \begin{equation} \hat{I} \ket{g} = \text{(interested in me)}\ket{g} \end{equation} } \uncover<3->{ such that she wants to apply to Love operator on me, that is: \begin{equation} \item \hat{L}_{w} \ket{b} = \text{(?)} \ket{b} \end{equation} } \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Conclusion} Quantum Physics in a nutshell: \begin{enumerate} \pause \item Define your wave functions in a suitable basis. \pause \item Investigate how operators apply on the wave functions (and pay much attention how you can apply them). \pause \item Obtain physical results that can be experimentally achieved. \end{enumerate} \end{frame} %---------------------------------------------------------------------------------------- \section{Conclusion} \begin{frame} \frametitle{Acknowledgements} \begin{columns} \column{0.5\textwidth} \uncover<2->{ \begin{figure} \includegraphics[scale=0.10]{Pics/grad.JPG} \caption{Success! $(n \gg 1)$} \end{figure}} \column{0.65\textwidth} I acknowledge \uncover<2->{psychological support from my girlfriend Nicoleta (JUB, Class of 2017)} \uncover<3->{and financial support from my parents, through the Grant RO-B961217. \begin{figure} \includegraphics[scale=0.065]{Pics/fam.jpg} \caption{Prelipcean Family} \end{figure}} \end{columns} \note{For those who knew QM from before, I hope you enjoyed this. For those who did not know, I hope that you know have an idea what QM is all about. To end, I would like to share with all of you the following message: Science is the most beautiful endeavour mankind has ever done. Thank you! } \end{frame} %---------------------------------------------------------------------------------------- \begin{frame} \frametitle{Take Home Message} \uncover<2->{Humans} \uncover<3->{= social beings, who urge to feel love. } \nl \uncover<4->{Tell "I love you" more often, and louder!} \nl \uncover<5->{Even the most aesthetic equation cannot compare to a sincere \\ "I love you" \\ from your loved ones.} \nl \uncover<6->{Thank You and May the Force be with You!} \end{frame} %---------------------------------------------------------------------------------------- \end{document}
{ "alphanum_fraction": 0.6349206349, "avg_line_length": 29.6538461538, "ext": "tex", "hexsha": "923997621e328da926879f23bedf694c447d5906", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9be486eae32a8d37d6ac972f2d873a386df5e4fb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dprelipcean/presentations", "max_forks_repo_path": "180504-stem-slam-jub-quantum-love/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9be486eae32a8d37d6ac972f2d873a386df5e4fb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dprelipcean/presentations", "max_issues_repo_path": "180504-stem-slam-jub-quantum-love/main.tex", "max_line_length": 398, "max_stars_count": null, "max_stars_repo_head_hexsha": "9be486eae32a8d37d6ac972f2d873a386df5e4fb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dprelipcean/presentations", "max_stars_repo_path": "180504-stem-slam-jub-quantum-love/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4560, "size": 16191 }
\documentclass[a4paper]{article} \usepackage{skak} \def\package#1{``\textsf{#1}''} \def\command#1{``\texttt{\symbol{92}#1}''} \def\commandwarg#1#2{\texttt{\symbol{92}#1}\{\textit{#2}\}} \def\base#1{$_{\scriptscriptstyle #1}$} \def\abbrev#1{\textsf{#1}} \def\bnf#1{\textit{#1}} \def\bnfterm#1{\textbf{#1}} \def\bnfprod{$\rightarrow$} \def\bnfempty{$\epsilon$} \def\bnfor{$|$} \def\bnfjoin{$\bowtie$} \setlength{\parindent}{0pt} \title{The Syntax of Chess Moves in the \LaTeX{} Package \package{skak} (Draft)} \author{Dirk B\"achle} \begin{document} \maketitle \begin{abstract} This short document contains some thoughts and ideas about the ``to be supported'' syntax of chess moves, including SAN (Short Algebraic Notation) as well as LAN (Long Algebraic Notation). The main purpose of this draft is to specify a concrete set of allowed chess moves. In the next step, extensive test routines for this syntax should be generated that can be used to verify the final implementation\ldots \end{abstract} \section*{Stage 1: Parsing \command{mainline}/\command{variation}} The commands \command{mainline} and \command{variation} should accept a nonemtpy list of space separated ``move tokens'' (\abbrev{MT}): \medskip \hfil\commandwarg{mainline}{MT\_list}\hfil or \hfil\commandwarg{variation}{MT\_list}\hfil where \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MT\_list} \bnfprod\={} \bnf{MT} \bnfterm{space} \bnf{MT\_list}\\ \>\bnfor{} \bnf{MT} \end{tabbing} \end{minipage} \end{center} using BNF (Backus-Naur Form) notation with `\bnfterm{space}' as the terminal symbol for the character '\ ' (ASCII code 32\base{10}). \section*{Stage 2: Splitting off move numbers} Each \abbrev{MT} may be either a chess move (\abbrev{CM}), a move number (\abbrev{MN}) or a token like ``\texttt{2.Kg1}'' that combines both: \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MT} \bnfprod\={} \bnf{MN} \bnf{CM}\\ \>\bnfor{} \bnf{CM}\\ \>\bnfor{} \bnf{MN} \end{tabbing} \end{minipage} \end{center} The only task of this stage is to separate ``combined tokens'' (first rule in the production above) and, therefore, supply the next stage 3 with a steady stream of single move tokens (\abbrev{M}). \begin{quote} Remark: This ``separation'' probably has to be done by inspecting the whole token character for character. Thus, it appears to be efficient to already collect information about the move number \abbrev{MN} (White/Black to move, getting the number itself, suppressing leading zeros) in this early stage\ldots \end{quote} No semantic checking is done here, i.e.~this stage doesn't detect errors like \mbox{``\texttt{2.~Kg1 3.~Bf4}''}. \section*{Stage 3: Parsing single move tokens} At this point, we can be sure to get either a chess move \abbrev{CM} or a move number \abbrev{MN}: \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{M} \bnfprod\={} \bnf{CM}\\ \>\bnfor{} \bnf{MN} \end{tabbing} \end{minipage} \end{center} Both can be distinguished well by inspecting the first character of the token, since only move numbers \abbrev{MN} may start with a digit (\abbrev{D}). This stage would be the right place to check for the correct order of chess moves and move numbers, i.e.~if a \abbrev{MN} is encountered the right side (White or Black) should be to move. \section*{Parsing move numbers} Move numbers \abbrev{MN} consist of an integer value (\abbrev{N}), immediately followed by one or three dots as terminal symbols, signalling whether it's Whites or Blacks move. \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MN} \bnfprod\={} \bnf{N} \bnfterm{...}\\ \>\bnfor{} \bnf{N} \bnfterm{.} \end{tabbing} \end{minipage} \end{center} An integer value \abbrev{N} is a nonempty list of single digits (\abbrev{D})\ldots \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{N} \bnfprod\={} \bnf{D} \bnf{N}\\ \>\bnfor{} \bnf{D} \end{tabbing} \end{minipage} \end{center} \ldots where each digit \abbrev{D} should be contained in the set of terminal symbols 0--9. \begin{center} \begin{minipage}{5cm} \begin{tabbing} \bnf{D} \bnfprod\={} \bnfterm{0}\bnfor\bnfterm{1}\bnfor\bnfterm{2}% \bnfor\bnfterm{3}\bnfor\bnfterm{4}\bnfor\bnfterm{5}\bnfor\bnfterm{6}% \bnfor\bnfterm{7}\bnfor\bnfterm{8}\bnfor\bnfterm{9} \end{tabbing} \end{minipage} \end{center} Here, the move number can be checked against the internal move counter if necessary. \section*{Parsing chess moves} A chess move \abbrev{CM} starts with the ``move specification'' (\abbrev{MS}), giving information about the piece that is to move and the source and destination squares. Additionally, an arbitrary number of character tokens may follow as a move comment (\abbrev{MC}). So, after enough ``hints'' for executing the move are collected, i.e.~\abbrev{MS} was ``matched'', the rest of the token is regarded as comment \abbrev{MC} and is output unchanged. \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{CM} \bnfprod\={} \bnf{MS} \bnf{MC}\\ \>\bnfor{} \bnf{MS} \end{tabbing} \end{minipage} \end{center} Before defining the move specification \abbrev{MS} itself a few helping nonterminal symbols are introduced. Please, remember that bold characters within the ``rules'' denote terminal symbols\ldots \begin{enumerate} \item A ``piece'' character (\abbrev{P}) \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{P} \bnfprod\={} \bnfterm{K}\bnfor\bnfterm{Q}\bnfor\bnfterm{B}% \bnfor\bnfterm{N}\bnfor\bnfterm{R} \end{tabbing} \end{minipage} \end{center} Remark: This definition uses the english letters for the single pieces. The real implementation should be independent of the used language. \item A ``file'' character (\abbrev{f}) \begin{center} \begin{minipage}{5cm} \begin{tabbing} \bnf{f} \bnfprod\={} \bnfterm{a}\bnfor\bnfterm{b}\bnfor\bnfterm{c}% \bnfor\bnfterm{d}\bnfor\bnfterm{e}\bnfor\bnfterm{f}\bnfor\bnfterm{g}% \bnfor\bnfterm{h} \end{tabbing} \end{minipage} \end{center} \item A ``rank'' character (\abbrev{r}) \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{r} \bnfprod\={} \bnfterm{1}\bnfor\bnfterm{2}\bnfor\bnfterm{3}% \bnfor\bnfterm{4}\bnfor\bnfterm{5}\bnfor\bnfterm{6}\bnfor\bnfterm{7}% \bnfor\bnfterm{8} \end{tabbing} \end{minipage} \end{center} \item A ``capture'' character (\bnfjoin) \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnfjoin{} \bnfprod\={} \bnfterm{-}\\ \>\bnfor{} \bnfterm{x}\\ \>\bnfor{} \bnfempty \end{tabbing} \end{minipage} \end{center} where '\bnfempty' denotes the ``empty symbol''. \end{enumerate} Move specifications (\abbrev{MS}) can be subdivided into the following groups: \begin{itemize} \item Starting with a ``piece'' character \begin{itemize} \item With source square \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MS} \bnfprod\={} \bnf{P} \bnf{f} \bnf{r} \bnfjoin{} \bnf{f} \bnf{r}\\ \>\bnfor{} \bnf{P} \bnf{f} \bnfjoin{} \bnf{f} \bnf{r}\\ \>\bnfor{} \bnf{P} \bnf{r} \bnfjoin{} \bnf{f} \bnf{r} \end{tabbing} \end{minipage} \end{center} Examples: ``\texttt{Qf3-f4}'', ``\texttt{Ree6}'', ``\texttt{N1xc3}'' \item Only a destination square present \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MS} \bnfprod\={} \bnf{P} \bnfjoin{} \bnf{f} \bnf{r} \end{tabbing} \end{minipage} \end{center} Example: ``\texttt{Nxc4}'' \end{itemize} \item Without leading ``piece'' character \begin{itemize} \item With source square \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MS} \bnfprod\={} \bnf{f} \bnf{r} \bnfjoin{} \bnf{f} \bnf{r} \bnf{P}\\ \>\bnfor{} \bnf{f} \bnfjoin{} \bnf{f} \bnf{r} \bnf{P}\\ \>\bnfor{} \bnf{f} \bnf{r} \bnfjoin{} \bnf{f} \bnf{r}\\ \>\bnfor{} \bnf{f} \bnfjoin{} \bnf{f} \bnf{r} \end{tabbing} \end{minipage} \end{center} Examples: ``\texttt{f7-f8R}'', ``\texttt{dxe8B}'', ``\texttt{g2-g4}'', ``\texttt{fxe6}'' \item Only a destination square present \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MS} \bnfprod\={} \bnf{f} \bnf{r} \bnf{P}\\ \>\bnfor{} \bnf{f} \bnf{r} \end{tabbing} \end{minipage} \end{center} Examples: ``\texttt{e8R}'', ``\texttt{a6}'' \end{itemize} \item Castlings \begin{center} \begin{minipage}{3cm} \begin{tabbing} \bnf{MS} \bnfprod\={} \bnfterm{O-O-O}\\ \>\bnfor{} \bnfterm{O-O} \end{tabbing} \end{minipage} \end{center} Both of these terminals use the letter 'O' (ASCII code 79\base{10}) and not the digit '0' (ASCII code 48\base{10})! \end{itemize} \section*{Final remark} This text tries to provide only the syntax for the frontend of the new ``move machine''. All the things that happen after the input was ``matched'' and information was drawn out of the given ``chess moves'' to the largest extent, i.e.~the semantic actions, are beyond the scope of this document. So, at the moment it is perfectly legal and syntactically correct to say ``\texttt{4. gxf8K}''\ldots \end{document}
{ "alphanum_fraction": 0.7050408719, "avg_line_length": 27.3540372671, "ext": "tex", "hexsha": "ecc48f79310cf49d1152d8718aeb962eea272457", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-07-14T03:52:30.000Z", "max_forks_repo_forks_event_min_datetime": "2019-07-14T03:52:30.000Z", "max_forks_repo_head_hexsha": "31b40a8987d2890a879f604ae10c2faf07f09436", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "lehoff/skak", "max_forks_repo_path": "syntax/syntax.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "31b40a8987d2890a879f604ae10c2faf07f09436", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "lehoff/skak", "max_issues_repo_path": "syntax/syntax.tex", "max_line_length": 89, "max_stars_count": 2, "max_stars_repo_head_hexsha": "31b40a8987d2890a879f604ae10c2faf07f09436", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "lehoff/skak", "max_stars_repo_path": "syntax/syntax.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-18T05:19:20.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-25T05:41:26.000Z", "num_tokens": 3176, "size": 8808 }
\section{Conclusions} \label{results} This last Section presents results obtained on the test sets. We test the best multilayer perceptron and the best convolutional network found. As stated on the first Section, the five test sets are made out of the folds number five, seven, eight, nine and ten. \subsection{Test set results} \paragraph{Test set preprocessing} The test sets are preprocessed with the same techniques used to extract the training sets.\\ In fact, regarding the MLP training set, the one with 180 features, all the features are extracted from the test audio files and scaled using the pre-fitted Standardscaler. Similarly, each pixel in the test images is scaled using the Standardscaler fitted on the image training set. \paragraph{Results} The MLP model is the fine tuned one coming from the hyperparameter tuning phase, trained on the 180 features dataset, namely the \emph{Extended model}, while the CNN one is the best performer from the tried convolutional models, the so called $C_3$ network with 0.001 Adam learning rate and 32 batch size. Parameters for both models can be seen at the end of Section \vref{mlp} and \vref{cnn}. \begin{center} \begin{tabular}{ |l|r|r| } \hline Test set & MLP Accuracy & CNN Accuracy\\ \hline Fold 5 & 0.7425 & 0.5171\\ Fold 7 & 0.6038 & 0.5668\\ Fold 8 & 0.6873 & 0.5744\\ Fold 9 & 0.6348 & 0.5515\\ Fold 10 & 0.6858 & 0.5639\\ \hline \end{tabular} \end{center} The mean accuracy and standard deviation are: \begin{center} \begin{tabular}{ |c|r|r| } \hline Model & Mean accuracy & Standard deviation\\ \hline MLP & 0.6708 & 0.0478 \\ CNN & 0.5547 & 0.0202 \\ \hline \end{tabular} \end{center} \subsection{Future works} Results are better on MLP models, but CNNs shows potential on the problem, both could be improved.\\ Indeed, for MLP a more refined training set creation can be made, by exploiting more features from the Librosa library, testing different type of scalers and experimenting with different feature selection techniques.\\ Regarding the image dataset, more channels can be considered by using more features as images, also, data augmentation techniques would increase the cardinality of the dataset. Finally, with more computational power, a proper random search can be performed.
{ "alphanum_fraction": 0.7204968944, "avg_line_length": 39.5901639344, "ext": "tex", "hexsha": "60a538a93828b1b877b900e670b3a117ddfde7c5", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-02-08T22:16:39.000Z", "max_forks_repo_forks_event_min_datetime": "2022-02-08T22:16:39.000Z", "max_forks_repo_head_hexsha": "9516e9a4f6ed3af2c5847c13321f8c0624ff827d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tomfran/urban-sound-classification", "max_forks_repo_path": "report/chapters/results.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "9516e9a4f6ed3af2c5847c13321f8c0624ff827d", "max_issues_repo_issues_event_max_datetime": "2021-11-17T10:16:19.000Z", "max_issues_repo_issues_event_min_datetime": "2021-11-17T10:16:19.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tomfran/urban-sound-classification", "max_issues_repo_path": "report/chapters/results.tex", "max_line_length": 99, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9516e9a4f6ed3af2c5847c13321f8c0624ff827d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tomfran/urban-sound-classification", "max_stars_repo_path": "report/chapters/results.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:33:40.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-08T22:33:40.000Z", "num_tokens": 634, "size": 2415 }
\section{Confidence intervals} Confidence Intervals follow the form:\\ (statistic) $\pm$ (critical value)(estimated standard deviation of statistic)\\ Let $\displaystyle ( E,(\mathbb{P}_{\theta })_{\theta \in \Theta })$ be a statistical model based on observations $X_{1} , \ldots X_{n}$ and assume $\displaystyle \Theta \subseteq \mathbb{R}$. Let $\displaystyle \alpha \in ( 0,1)$.\\ \textbf{Non asymptotic} confidence interval of level $\displaystyle 1-\alpha $ for $\displaystyle \theta $:\\ Any random interval $\displaystyle \mathcal{I}$, depending on the sample $X_{1} , \ldots X_{n}$ but not at $\displaystyle \theta $ and such that:\\ $\mathbb{P}_{\theta }[\mathcal{I} \ni \theta ] \geq 1-\alpha ,\ \ \forall \theta \in \Theta$\\ Confidence interval of \textbf{asymptotic level} $\displaystyle 1-\alpha $ for $\displaystyle \theta $:\\ Any random interval $\displaystyle \mathcal{I}$ whose boundaries do not depend on $\displaystyle \theta $ and such that: $\lim _{n\rightarrow \infty }\mathbb{P}_{\theta } [\mathcal{I} \ni \theta ]\geq 1-\alpha ,\ \ \forall \theta \in \Theta $ \subsection{Two-sided asymptotic CI} Let $X_1, \ldots, X_n = \tilde{X}$ and $\tilde{X}\stackrel{iid} {\sim} P_{\theta}$. A two-sided CI is a function depending on $\tilde{X}$ giving an upper and lower bound in which the estimated parameter lies $\mathcal{I} = [l(\tilde{X},u(\tilde{X})]$ with a certain probability $\mathbb{P}(\theta \in \mathcal{I}) \geq 1 -q_{\alpha}$ and conversely $\mathbb{P}(\theta \not\in \mathcal{I}) \leq \alpha$\\ Since the estimator is a r.v. depending on $\tilde{X}$ it has a variance $Var(\hat{\theta}_n$ and a mean $\mathbb{E}[\hat{\theta}_n]$. Since the CLT is valid for every distribution standardizing the distributions and massaging the expression yields an an asymptotic CI: \begin{align*} \mathcal{I} = [&\hat{\theta}_n - \frac{q_{\alpha /2} \sqrt{Var(X_i)} }{\sqrt{n}},\\ &\hat{\theta}_n + \frac{q_{\alpha /2} \sqrt{Var(X_i)} }{\sqrt{n}}] \end{align*} This expression depends on the real variance $Var(X_i)$ of the r.vs, the variance has to be estimated.\\ Three possible methods: plugin (use sample mean or empirical variance), solve (solve quadratic inequality), conservative (use the theoretical maximum of the variance). \subsection{Sample Mean and Sample Variance} Let $X_1, ..., X_n \stackrel{iid}{\sim} P_{\mu}$, where $E(X_i)=\mu$ and $Var(X_i)=\sigma^2$ for all $i=1,2,...,n$\\ \textbf{Sample Mean:} \begin{align*} \bar{X}_n= \frac{1}{n} \sum_{i=1}^{n} X_i \end{align*} \textbf{Sample Variance:} \begin{align*} S_n &= \frac{1}{n} \sum_{i=1}^{n} (X_i - \bar{X}_n)^2\\ &= \frac{1}{n} (\sum_{i=1}^{n} X_i^2) - \bar{X}_n^2 \end{align*} \textbf{Unbiased estimator of sample variance:} \begin{align*} \tilde{S}_n &= \displaystyle \frac{1}{n-1} \sum _{i=1}^ n \left(X_ i - \overline{X}_ n\right)^2\\ &= \frac{n}{n-1} S_n \end{align*} \subsection{Delta Method} To find the asymptotic CI if the estimator is a function of the mean. Goal is to find an expression that converges a function of the mean using the CLT. Let $Z_n$ be a sequence of r.v. $\sqrt(n) (Z_n-\theta) \xrightarrow[n \rightarrow \infty]{(d)} N(0,\sigma^2)$ and let $g: R\longrightarrow R$ be continuously differentiable at $\theta$, then: \begin{align*} &\sqrt{n}(g(Z_n) - g(\theta)) \xrightarrow [n \to \infty ]{(d)}\\ &\mathcal{N}(0, g'(\theta )^2 \sigma ^2) \end{align*} \textbf{Example:} let $X_1,... ,X_n ~ exp(\lambda)$ where $\lambda>0$ . Let $\overline{X}_ n= \frac{1}{n} \sum _{i = 1}^ n X_ i$ denote the sample mean. By the CLT, we know that $\sqrt{n}\left(\overline{X}_ n - \frac{1}{\lambda }\right) \xrightarrow [n \to \infty ]{(d)} N(0, \sigma ^2)$ for some value of $\sigma^2$ that depends on $\lambda$. If we set $g: \displaystyle \mathbb {R} \to \mathbb {R}$ and $\displaystyle x \mapsto 1/x,$ then by the Delta method: \begin{align*} &\sqrt{n}\left( g(\overline{X}_ n) - g\left(\frac{1}{\lambda }\right) \right)\\ &\xrightarrow [n \to \infty ]{(d)} N(0, g'(E[X])^2\textsf{Var}{X})\\ &\xrightarrow [n \to \infty ]{(d)} N(0, g'\left(\frac{1}{\lambda }\right)^2\frac{1}{\lambda ^2})\\ &\xrightarrow [n \to \infty ]{(d)} N(0, \lambda^2) \end{align*}
{ "alphanum_fraction": 0.6657888437, "avg_line_length": 75.9454545455, "ext": "tex", "hexsha": "2b66c4f25029fac917ae19fe3f6f69b21580941d", "lang": "TeX", "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2022-02-12T10:41:57.000Z", "max_forks_repo_forks_event_min_datetime": "2020-03-30T21:12:37.000Z", "max_forks_repo_head_hexsha": "51d644cdecabb3cb7c8dedc5816359ba641a3d19", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tony-ml/MITx_capstone_1", "max_forks_repo_path": "content/Confidence_intervals.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "51d644cdecabb3cb7c8dedc5816359ba641a3d19", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tony-ml/MITx_capstone_1", "max_issues_repo_path": "content/Confidence_intervals.tex", "max_line_length": 405, "max_stars_count": 25, "max_stars_repo_head_hexsha": "51d644cdecabb3cb7c8dedc5816359ba641a3d19", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "blechturm/MITx_capstone_1", "max_stars_repo_path": "content/Confidence_intervals.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-31T18:11:56.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-30T18:06:22.000Z", "num_tokens": 1500, "size": 4177 }
% Make sure we don't inherit an xtabular definition from a previous chapter \tablehead{} \tablefirsthead{} \tabletail{} \tablelasttail{} \chapter{Adding Functions and Data Types} Icon is designed so that new functions and data types can be added with comparative ease. This appendix provides some guidelines for modifying the Icon run-time system and lists useful macro definitions and support routines. It is designed to be read in conjunction with the source code for the implementation. The material included here only touches on the possibilities. There is no substitute for actually implementing new features and spending time studying the more intricate parts of the Icon system. This appendix provides an action summary of how to extend the language implementation. For non-trivial additions, the reader should consult Appendix G which contains a detailed description of the RTL language in which the run-time system is written. \section{File Organization} The Icon system is organized in a hierarchy. Under UNIX, the root of the Icon hierarchy is called icon.git/trunk {\color{blue} (the Unicon root directory is called unicon)} and may be located anywhere. Neither Icon nor Unicon depends on the name of the root directory and, for other operating systems, it may be named differently. The {\it root} directory has several subdirectories that contain source code, test programs, documents, and so forth. The source code is in {\it root}/src. The subdirectories of src are shown below. The subdirectory h holds header files that are included by files in the other subdirectories. The file \textfn{h/rt.h} contains most of the definitions and declarations used in the run-time system. The rest of the code related to the run-time system is in the subdirectory \textfn{runtime}. Source file extensions in this directory are \texttt{.r} and \texttt{.ri} for ``run-time'' and ``run-time include'' files which are processed by rtt. First letters of files in this subdirectory indicate the nature of their contents. Files that begin with the letter f contain code for functions, while files that begin with o contain code for operators. Code related directly to the interpretive process is in files that begin with the letter i. ``Library'' routines for operations such as list construction that correspond to virtual machine instructions are in files that begin with the letter l. Finally, files that begin with the letter r hold run-time support routines. Within each category, routines are grouped by functionality. For example, string construction functions such as map are in \textfn{fstr.r}, while storage allocation and garbage collection routines are in \textfn{rmemmgt.r}. A simplified file structure is shown below. The emphasis is on directories that contain source or documentation. Directories that are common to both Icon and Unicon are in black; Icon specific directories are shown in grey; Unicon specific directories are shown in blue. In some cases there is additional sub-structure not shown here. \noindent\index{Icon Directory structure}\index{Unicon Directory structure} \begin{picture}(450,370)(0,40) %\put(0,40){\graphpaper{45}{37}} {\thicklines \put(0,205){\makebox(80,20){Icon.git/trunk}} \put(0,185){\color{blue}\makebox(80,20){unicon}} \put(0,205){\line(1,0){80}} \put(80,100){\line(0,1){280}} \put(110,370){\makebox(0,20)[l]{bin}} \put(150,370){\makebox(0,20)[l] {\parbox{100pt}{\em Executable binaries and support files}}} \put(110,330){\makebox(0,20)[l]{config}} \put(150,330){\makebox(0,20)[l]{\parbox{70pt}{\em Configuration directories}}} \put(110,290){\makebox(0,20)[l]{doc}} \put(150,290){\makebox(0,20)[l]{\parbox{100pt}{\em Documents}}} \put(110,250){\makebox(0,20)[l]{ipl}} \put(150,250){\makebox(0,20)[l]{\parbox{70pt}{\em Icon program library}}} {\color[rgb]{0.5,0.5,0.5} \put(110,210){\makebox(0,20)[l]{man}} \put(150,210){\makebox(0,20)[l]{\parbox{100pt}{\em Man pages}}} }%grey \put(110,170){\makebox(0,20)[l]{src}} \put(130,180){\line(1,0){130}} \put(110,130){\makebox(0,20)[l]{tests}} \put(150,130){\parbox[l]{80pt} {\em benchmarks, C~interface~tests, general~tests etc.}} {\color{blue} \put(110,90){\makebox(0,20)[l]{uni}} \put(150,90){\makebox(0,20)[l]{\parbox{80pt}{Unicon library}}} }%blue \multiput(80,100)(0,40){8}{\line(1,0){20}} %src tree \begin{picture}(0,0)(0,30) \put(260,80){\line(0,1){300}} \multiput(260,80)(0,30){11}{\line(1,0){20}} \put(290,370){\makebox(0,20)[l]{common}} \put(350,370){\makebox(0,20)[l]{\em common source}} \put(290,340){\makebox(0,20)[l]{h}} \put(350,340){\makebox(0,20)[l]{\em header files}} \put(290,310){\makebox(0,20)[l]{\color{blue}iconc}} \put(350,310){\makebox(0,20)[l]{\color{blue}\em Icon compiler source}} \put(290,280){\makebox(0,20)[l]{icont}} \put(350,280){\makebox(0,20)[l]{\em Icon translator source}} {\color{blue} \put(290,250){\makebox(0,20)[l]{lib}} \put(350,250){\makebox(0,20)[l]{ \parbox{100pt}{\em Additional libraries for some platforms}}} \put(290,220){\makebox(0,20)[l]{libtp}} \put(350,220){\makebox(0,20)[l]{ \parbox{120pt}{\em transfer protocol library}}} }%blue \put(290,190){\makebox(0,20)[l]{preproc}} \put(350,190){\makebox(0,20)[l]{\em preprocessor source}} \put(290,160){\makebox(0,20)[l]{rtt}} \put(350,160){\makebox(0,20)[l]{\em run-time translator source}} \put(290,130){\makebox(0,20)[l]{runtime}} \put(350,130){\makebox(0,20)[l]{\em run-time source}} {\color[rgb]{0.5,0.5,0.5} \put(290,100){\makebox(0,20)[l]{wincap}} \put(350,100){\makebox(0,20)[l]{\em Wincap library source}} }%grey \put(290,70){\makebox(0,20)[l]{xpm}} \put(350,70){\makebox(0,20)[l]{\em xpm library source}} \end{picture}% src tree }% thicklines \end{picture} \section{Adding Functions} There are several conventions and rules of protocol that must be followed in writing a new function. The situations that arise most frequently are covered in the following sections. The existing functions in f files in \textfn{runtime} provide many examples to supplement the information given here. \subsection{Function Declarations} A function begins with the RTL header \texttt{function\{...\} name(args)}, where name is the name of the function as it is called in a source-language program. For example, \iconline{ function\{1\} map(s1,s2,s3) } \noindent appears at the beginning of the function map. This header declares the procedure block for the function and includes information about its number of results (in curly brackets) and the number and names of its parameters. See Appendix G. A \texttt{Z} is prepended to the name given to avoid a collision with the names of other C routines in the run-time system. Thus, the C function that implements map is named \texttt{Zmap}. Although the Icon function map has three arguments, the corresponding C function has only one: \texttt{r\_args}, which is a pointer to an array of descriptors on the interpreter stack. For example, \texttt{function \{1\} map(s1,s2,s3)} generates \goodbreak \begin{iconcode} \ \ int Zmap(r\_args)\\ \ \ dptr r\_args; \end{iconcode} \noindent A new function must also be listed via a one-line macro in \texttt{src/h/fdefs.h}. For a fixed number of arguments the macro is \texttt{FncDef(name,nargs)} and for a variable number of arguments the macro is \texttt{FncDefV(name)}. \subsection{Returning from a Function} A function returns control to the interpreter by use of one of three reserved words \texttt{return}, \texttt{suspend}, or \texttt{fail}, depending on whether the function returns, suspends, or fails, respectively. \texttt{return} and \texttt{fail} return codes that the interpreter uses to differentiate between the two situations. \texttt{suspend} returns control to the interpreter by calling it recursively, as described in Sec. 9.3. The use of \texttt{return} is illustrated by the following trivial function that simply returns its argument: \goodbreak \begin{iconcode} function\{1\} idem(x)\\ abstract \{ return type(x) \} \\ body \{\\ \>return x;\\ \> \} \\ end \end{iconcode} \noindent For example, \iconline{ \ \ write(idem("hello")) } \noindent writes \texttt{hello}. The use of \texttt{suspend} and \texttt{fail} is illustrated by the following function, which generates its first and second arguments in succession: \goodbreak \begin{iconcode} function\{2\} gen2(x,y)\\ abstract \{ return type(x) ++ type(y) \} \\ body \{\\ \>suspend x;\\ \>suspend y;\\ \>fail;\\ \} \end{iconcode} \noindent For example, \iconline{every write(gen2("hello", "there"))} \noindent writes \goodbreak \begin{iconcode} \ \ hello\\ \ \ there \end{iconcode} \noindent As illustrated previously, \texttt{fail} is used when there is not another result to produce. It is safe to assume that arguments are intact when the function is resumed to produce another result. Most functions have a fixed number of arguments. A number of functions such as \texttt{write}, \texttt{writes}, and \texttt{stop} in the standard Icon repertoire can be called with an arbitrary number of arguments. For a function that can be called with an arbitrary number of arguments, the final RTL argument is given a square brackets enclosing a number of arguments, as in \texttt{function{1} name(x[nargs])}. When this format is used, the function is called with two arguments: the number of arguments in the call and a pointer to the corresponding array of descriptors. For example, \texttt{function{1} write(x[nargs])} generates \goodbreak \begin{iconcode} Zwrite(r\_nargs, r\_args)\\ int r\_nargs;\\ dptr r\_args; \end{iconcode} Within such a function, the arguments are referenced using array syntax. For example, a function that takes an arbitrary number of arguments and suspends with them as values in succession is \goodbreak \begin{iconcode} function\{*\} gen(x[n])\\ {\color{red} abstract \{ return type(x[]) \} /* probably doesn't work */ } \\ body \{\\ \>register int i;\\ \>for (i = 0; i < n; i++) \{\\ \>\>suspend x[i];\\ \>\}\\ \>fail;\\ \} \end{iconcode} \noindent For example, \iconline{ \ \ every write(gen("hello","there","!")) } \noindent writes \goodbreak \begin{iconcode} \ \ hello\\ \ \ there\\ \ \ ! \end{iconcode} \noindent Note the use of \texttt{fail} at the end of the function; the omission of \texttt{fail} would be an error, since returning by flowing off the end of the function would not provide the return code that the interpreter expects. \subsection{Type Checking and Conversion} Some functions need to perform different operations, depending on the types of their arguments. An example is \texttt{type(x)}: \goodbreak \begin{iconcode} function\{1\} type(x)\\ abstract \{ return string \} \\ body \{\\ \>type\_case x of \{\\ \>\> string: inline \{ return C\_string "string"; \} \\ \>\> null: inline \{ return C\_string "null"; \} \\ \>\> integer: inline \{ return C\_string "integer"; \} \\ \>\> real: inline \{ return C\_string "real"; \} \\ \>\> cset: inline \{ return C\_string "cset"; \} \\ \>\> \vdots \\ \>\} \\ \end{iconcode} \noindent Icon values are stored in descriptors, wherein the d-word serves to differentiate between types. Strings require a separate test prior to examining the d-word's type code, but this is abstracted by the RTL \texttt{type\_case} construct. For most functions, arguments must be of a specific type. As described in Sec. 12.1, type conversion routines are used for this purpose. For example, the function \texttt{pos(i)} requires that \texttt{i} be an integer. It begins as follows: \goodbreak \begin{iconcode} function\{0,1\} pos(i)\\ \> if !cnv:C\_integer(i) then runerr(101, i); \\ \> abstract \{ return integer \} \\ body \{\\ \> \vdots \end{iconcode} \noindent The RTL syntax for type conversion and defaulting turns around and calls underlying C functions such as \texttt{cnv\_int()} which are defined in \texttt{src/runtime/cnv.r}. If the conversion is successful, the resulting integer is assigned to \texttt{i}. As indicated by this example, it is the responsibility of a function to terminate execution by calling \texttt{runerr} if a required conversion cannot be made. Two string conversion functions are available. cnv\_str() allocates space for the converted string from the heap, while cnv\_tstr() requires a buffer, which is supplied by the routine that calls it. See Sec. 4.4.4. This buffer must be large enough to hold the longest string that can be produced by the conversion of any value. This size is given by the defined constant \texttt{MaxCvtLen}. For example, the function to reverse a string begins as follows: \goodbreak \begin{iconcode} function\{0,1\} match(s1,s2,i,j)\\ \> \vdots \\ \> if !cnv:tmp\_string(s1) then runerr(103,s1) \end{iconcode} for which the generated C code looks like \begin{iconcode} int Zmatch(dptr r\_args)\\ \> \vdots \\ \> char r\_sbuf[1][MaxCvtLen]; \\ \> \vdots \\ \> if (!cnv\_tstr(r\_sbuf[0], \&(r\_args[1]), \&(r\_args[1]))) \{ \\ \{ err\_msg(103, \&(r\_args[1])); \} \} \end{iconcode} \noindent The buffer is used only if a nonstring value is converted to a string. In this case, \texttt{r\_args[1]} is changed to a qualifier whose v-word points to the converted string in \texttt{r\_sbuf[0]}. This string does not necessarily begin at the beginning of \texttt{r\_sbuf[0]}. In any event, after a successful call to \texttt{cnv\_tstr}, the argument is an appropriate qualifier, regardless of whether a conversion actually was performed. \subsection{Constructing New Descriptors} Some functions need to construct new descriptors to return. RTL is aware that functions return descriptors and given type information, its return statement will construct a descriptor for you. As given in the function \texttt{type()} previously, for example, to return a qualifier for the string \texttt{"integer"}, the following code suffices: \begin{iconcode} return C\_string "integer"; \end{iconcode} Sometimes it is convenient to explicitly construct a descriptor by assignment to its d- and v-words. Various macros are provided to simplify these assignments. \texttt{StrLen()} and \texttt{StrLoc()} can be used to construct a qualifier. \goodbreak \begin{iconcode} StrLen(result) = 7;\\ StrLoc(result) = "integer";\\ return result; \end{iconcode} \noindent In these last two examples, the returned qualifier points to a statically allocated C string. There also are macros and support routines for constructing certain kinds of descriptors. For example, the macro \iconline{MakeStr("integer", 7, \&result); } accomplishes the sames as the two assignments to StrLoc() and StrLen() in the preceding example, and \iconline{MakeInt(i, \&result); } \noindent constructs an integer descriptor containing the integer \texttt{i} in the descriptor \texttt{result}. \subsection{Default Values} Many functions specify default values for null-valued arguments. There are RTL constructs for providing default values. For example, \iconline{ def:tmp\_string(arg, q) } \noindent changes \texttt{arg} to the string given by the qualifier \texttt{q} in case \texttt{arg} is null-valued. If \texttt{arg} is not null-valued, however, its value is converted to a string, if possible, by \texttt{def\_tstr}. If this is not possible, the function should terminate execution with an error message. \subsection{Storage Allocation} Functions that construct new data objects often need to allocate storage. Allocation is done in the allocated string region or the allocated block region, depending on the nature of the object. Support routines are provided to perform the actual allocation. As mentioned in Sec. 11.4, under certain circumstances such as to guarantee that multiple string allocations are adjacent during a concatenation, predictive need requests may be required before storage is actually allocated. The function \texttt{reserve(r, i)} requests \texttt{i} bytes of contiguous storage in region \texttt{r}. Such a request generally should be made as soon as an upper bound on the amount of storage needed is known. It is not necessary to know the exact amount, but the amount requested must be at least as large as the amount that actually will be allocated. For example, the function \texttt{reads(f, i)} requests \texttt{i} bytes of string storage, although the string actually read may be shorter. \textbf{String Allocation}. The function \texttt{alcstr(s, i)} copies \texttt{i} bytes starting at \texttt{s} into the allocated string region and returns a pointer to the beginning of the copy. For example, a function \texttt{double(s)} that produces the concatenation of \texttt{s} with itself is written as follows: \goodbreak \begin{iconcode} function{1} double(s)\\ abstract \{ return string \} \\ if !cnv:str(s) then runerr(103, s) \\ body \{\\ \>register int slen = StrLen(s);\\ \>reserve(Strings, 2 * slen);\\ \>StrLen(result) = 2 * slen;\\ \>StrLoc(result) = alcstr(StrLoc(s), slen);\\ \>alcstr(StrLoc(s), slen);\\ \>return result;\\ \} \end{iconcode} If the first argument of \texttt{alcstr} is \texttt{NULL}, instead of being a pointer to a string, the space is allocated and a pointer to the beginning of it is returned, but nothing is copied into the space. This allows a function to construct a string directly in the allocated string region. If a string to be returned is in a buffer as a result of conversion from another type, care must be taken to copy this string into the allocated string region --- otherwise the string in the buffer will be overwritten on subsequent calls. Copying such strings is illustrated by the function \texttt{string(x)} given in Sec. 12.1. \textbf{Block Allocation}. The block allocation code all lives in src/runtime/ralc.r. Macros \texttt{AlcFixBlk(var,structname,typecode)} and \texttt{AlcVarBlk(var,structname,typecode,numdescriptors)} allocate fixed and variable-sized blocks, respectively. They are generally used internally by run-time support routines for allocating each kind of blocks. Such support routines generally initialize block as well. For example, \texttt{alccset()} allocates a block for a cset, fills in the title and size words, and zeroes the bits for the cset: \goodbreak \begin{iconcode} struct b\_cset *alccset()\\ \{\\ \>register struct b\_cset *blk;\\ \>register i;\\ \> \vdots \\ \>AlcFixBlk(blk, b\_cset, T\_Cset); \\ \>blk->size = -1; /* flag size as not yet computed */ \\ \\ \>/*\\ \>\ * Zero the bit array.\\ \>\ */\\ \>for (i = 0; i < CsetSize; i++)\\ \>\>blk->bits[i] = 0; return blk;\\ \} \end{iconcode} \noindent See Sec. D.5.5 for a complete list of block-allocation functions. \subsection% {D.2.7 Storage Management Considerations} In addition to assuring that predictive need requests are made before storage is allocated, it is essential to assure that all descriptors contain valid data at any time a garbage collection may occur, that all descriptors are accessible to the garbage collector, and that all pointers to allocated data are in the v-words of descriptors. Normally, all the descriptors that a function uses are on the interpreter stack. Such descriptors are processed by the garbage collector. Occasionally, additional descriptors are needed for intermediate computations. If such descriptors contain pointers in their v-words, and if a function body allocates storage or suspends control to code that may allocate storage (potentially triggering a garbage collection), it is \textit{not} correct to declare local descriptors, as in \goodbreak \begin{iconcode} function\{1\} mesh(x,y)\\ \> \vdots \\ body \{\\ \>struct descrip d1, d2; \\ \> \vdots \\ \> subsequent code that may allocate memory, or suspend \end{iconcode} \noindent The problem with this approach is that \texttt{d1} and \texttt{d2} are on the C stack and the garbage collector has no way of knowing about them. RTL syntax provides the \texttt{tended} keyword for this situation: \begin{iconcode} function\{1\} mesh(x,y) \> \vdots \\ body \{\\ \> tended struct descrip d1, d2; \\ \> \vdots \\ \> subsequent code that may allocate memory, or suspend \end{iconcode} Garbage collection can occur only during an allocation or predictive need request. However, this can occur between the time a function suspends and the time it is resumed to produce another result. Consequently, if a pointer is kept in a C variable in a loop that is producing results by suspending, the pointer may be invalid when the function is resumed. Instead, the pointer should be kept in the v-word of a descriptor that is tended, i.e. accessible to the garbage collector. \subsection{Error Termination} An Icon program may terminate abnormally for two reasons: as the result of a source-language programming error (such as an invalid type in a function call), or as a result of an error detected in the Icon system itself (such as a descriptor that should have been dereferenced but was not). In case a source-language error is detected, execution is terminated by a call of the form \iconline{runerr(i, \&d); } \noindent where \texttt{i} is an error message number and \texttt{d} is the descriptor for the offending value. If there is no specific offending value, the second argument is 0. The array of error message numbers and corresponding messages is contained in \textfn{runtime/data.r}. If there is no appropriate existing error message, a new one can be added, following the guidelines given in Appendix G of Griswold and Griswold 1990. In theory, there should be no errors in the Icon system itself, but no large, complex software system is totally free of errors. Some situations are recognizable as being potential sources of problems in case data does not have the expected values. In such situations, especially during program development, it is advisable to insert calls of the function \texttt{syserr}, which terminates execution, indicating that an error was detected in the Icon system, and prints its argument as an indication of the nature of the error. It is traditional to use calls of the form \iconline{syserr("mesh: can't happen"); } \noindent so that when, in fact, the ``impossible'' does happen, there is a reminder of human frailty. More informative messages are desirable, of course. \subsection{Header Files} For RTL .r files, the necessary header files normally will be included automatically. Several of them are processed by rtt itself; its input automatically includes src/h/grttin.h which includes several of the header files, special macros, and a bunch of {\em ersatz\/} typedef's to fool rtt into parsing successfully without the system includes. The rest of the includes actually happen during the C compile on rtt's output since system includes may use compiler-specific extensions that rtt would not know about. rtt writes an include to src/h/rt.h to its output files to be processed by the C preprocessor. % The redefinition of arraystretch works as a way of reducing inter-row % spacing in the table, but maybe we should find a more elegant solution. {\renewcommand{\arraystretch}{0.9}% \noindent The header file rt.h includes the full suite of header files:\\[1ex] \noindent\hspace{0.5cm}% Indent the entire table % The magic spell ">{\textfn\bgroup} ... <{\egroup}" sets the font % for the first column. Just showing off for a three row table, % but it will come in handy later on in the chapter. \begin{xtabular}{>{\textfn\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} ../h/define.h & platform specific definitions\\ ../h/arch.h & (Icon only) automatically generated word size definitions\\ ../h/config.h & general configuration information\\ ../h/sys.h & system include files (\textfn{stdio.h} etc.)\\ ../h/typedefs.h & type definitions for the run-time system\\ ../h/cstructs.h & structure definitions and constants for the functions in the \textfn{src/common} directory\\ % ../h/mproto.h & (Icon only) prototypes for functions common to several modules\\ % {\color{blue}../h/proto.h} &% % Without the \hspace{0cm} we get extra _vertical_ space between columns: go figure. % It only happens when the \color{blue} is added. {\hspace{0cm}\color{blue}(Unicon only) prototypes for library functions}\\ % ../h/cpuconf.h & definitions that depend on the computer word size\\ {\color{blue}../h/monitor.h} &% {\hspace{0cm}\color{blue} (Unicon only) event code definitions}\\ ../h/rmacros.h & macros and manifest constants\\ ../h/rstructs.h & runtime data structures\\ ../h/graphics.h & graphics facilities (if configured) \\ ../h/audio.h & audio facilities (Unicon only; if configured) \\ ../h/posix.h & posix facilities (Unicon only) \\ ../h/messagin.h & messaging facilities (Unicon only) \\ ../h/rexterns.h & extern declarations for global variables \\ ../h/rproto.h & runtime system function prototypes \end{xtabular} }\\[1ex] % arraystretch is now back to normal \noindent All of these files contain appropriate information for the local installation, and no changes in them should be needed. \subsection{Installing a New Function} Both the linker and the run-time system must know the names of all functions. This information is provided in the header file \textfn{h/fdefs.h}. In order to add a function, a line of the form \iconline{FncDef(name) } \noindent must be inserted in \textfn{h/fdefs.h} in proper alphabetical order. Once this insertion is made, the Icon system must be recompiled to take into account the code for the new function. The steps involved in recompilation vary from system to system. Information concerning recompilation is available in system-specific installation documents. \section{Adding Data Types} Adding a new data type is comparatively simple, although there are several places where changes need to be made. Failure to make all the required changes can produce mysterious bugs. \subsection{Type Codes} At present, type codes range from 0 to 25 {\color{blue} (31 for Unicon)}. Every type must have a distinct type code and corresponding definitions. These additions are made in \textfn{h/rmacros.h}. First, a \texttt{T\_}definition is needed. For example, if a Boolean type is added, a definition such as \iconline{\#define T\_Boolean\ \ \ 26 } \noindent is needed. The value of \texttt{MaxType}, which immediately follows the type code definitions, must be increased to 26 accordingly. Failure to set \texttt{MaxType} to the maximum type code may result in program malfunction during garbage collection. See Sec. 11.3.2. Next a \texttt{D\_} definition is needed for the d-word of the new type. For a Boolean type, this definition might be \iconline{\#define D\_Boolean\ \ (T\_Boolean | F\_Nqual) } \noindent All nonstring types have the \texttt{F\_Nqual} flag and their \texttt{T\_} type code. Types whose v-words contain pointers also have the \texttt{F\_Ptr} flag. \subsection{Structures} A value of a Boolean type such as the one suggested previously can be stored in the d-word of its descriptor. However, most types contain pointers to blocks in their v-words. In this case, a declaration of a structure corresponding to the block must be added to \textfn{h/rstructs.h}. For example, a new rational number data type, with the type code \texttt{T\_Rational}, might be represented by a block containing two descriptors, one for the numerator and one for the denominator. An appropriate structure declaration for such a block is \goodbreak \begin{iconcode} struct b\_rational \{\\ \>int title;\\ \>struct descrip numerator;\\ \>struct descrip denominator;\\ \}; \end{iconcode} Since rational blocks are fixed in size, no size field is needed. However, a vector type with code \texttt{T\_Vector} in which different vectors have different lengths needs a size field. The declaration for such a block might be \goodbreak \begin{iconcode} struct b\_vector \{\\ \>int title;\\ \>int blksize;\\ \>struct descrip velems[1];\\ \}; \end{iconcode} \noindent As mentioned in Sec. 4.4.2, the size of one for the array of descriptors is needed to avoid problems with C compilers. In practice, this structure conceptually overlays the allocated block region, and the number of elements varies from block to block. Any new structure declaration for a block must be added to the declaration union \texttt{block} in \textfn{h/rstructs.h}. The order of the fields in a new structure must follow the rules (discussed next) imposed by the garbage collector. \subsection% {D.3.3 Information Needed for Storage Management} In Icon version 8 and later, allocated data may be referred to in two ways: either (as before) in the v-word of a descriptor, or in one of the pointers in a structure. All pointers to allocated data must be contained either in the v-words of descriptors or in pointers, since this is the only way the garbage collector can locate them. Furthermore, all non-descriptor data (including pointers) must precede any descriptors in a block. The amount of non-descriptor data, and hence the location of the first descriptor in a block, must be the same for all blocks of a given type. Pointers add new constraints: If any pointers are present, they must be in one contiguous group and must precede the first descriptor in the block although they need not be placed immediately before the descriptors. If any block has a variable number of pointers they must be placed at the end of the block (and there can be no descriptors in the block). As described in Sec. 11.3.2, the garbage collector uses the array \texttt{bsizes} to determine the size of a block and the array \texttt{firstd} to determine the offset of the first descriptor in the block. These arrays are in \textfn{rmemmgt.r}. When a new data type is added, appropriate entries must be made in these arrays. Failure to do so may result in serious bugs that occur only in programs that perform garbage collection, and the symptoms may be mysterious. There is an entry in \texttt{bsizes} for each type code. If the type has no block, the entry is -1. If the type has a block of constant size, the entry is the size of the block. Otherwise, the entry is 0, indicating that the size is in the second word of the block. Thus, the entry for \texttt{T\_Boolean} would be -1, the entry for \texttt{T\_Rational} would be \texttt{sizeof(struct b\_rational)}, and the size for \texttt{T\_Vector} would be 0. There is a corresponding entry in \texttt{firstd} for each type code that gives the offset of the first descriptor in its corresponding block. If there is no block, the entry is -1. If the block contains no descriptors, the entry is 0. For example, the entry for \texttt{T\_Boolean} would be -1, the entry for \texttt{T\_Rational} would be \texttt{WordSize}, and the entry for \texttt{T\_Vector} would be \texttt{2*WordSize}, where \texttt{WordSize} is a defined constant that is the number of bytes in a word. Two further arrays are used to determine the placement of pointers. \texttt{firstp} is analogous to \texttt{firstd}: there is one entry per type. If there is no block, the entry is -1. If the type has no pointers the value is 0, otherwise it is the offset of the first pointer in the block. It is possible to infer the number of descriptors from the position of the first and the size of the block, but this inference is not always possible for pointers so an array \texttt{ptrno} is used to indicate how many pointers there are for each type. If there are no pointers, the value is -1. A value of 0 means that the pointers occupy the rest of the block (and the block has no descriptors). Otherwise the entry contains the number of pointers in the block. As with \texttt{firstd} and \texttt{bsizes}, a failure to initialize \texttt{firstp} and \texttt{ptrno} correctly may result in serious (and mysterious) bugs. A fifth array, \texttt{blknames}, provides string names for all block types. These names are only used for debugging, and an entry should be made in \texttt{blknames} for each new data type. \subsection{Changes to Existing Code} In addition to any functions that may be needed for operating on values of a new data type, there are several functions and operators that apply to all data types and which may, therefore, need to be changed for any new data type. \noindent These are\\[1ex] {\renewcommand{\arraystretch}{0.9}% \begin{xtabular}{>{\hspace{1cm}\texttt\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} *x & size of x (in \textfn{runtime/omisc.r})\\ copy(x) & copy of x (in \textfn{runtime/fmisc.r})\\ image(x) & string image of x (in \textfn{runtime/fmisc.r})\\ type(x) & string name of type of x (in \textfn{runtime/fmisc.r})\\ \end{xtabular} }\\[1ex] There is not a concept of size for all data types. For example, a Boolean value presumably does not have a size, but the size of a vector presumably is the number of elements it contains. The size of a rational number is problematical. Modifications to \texttt{*x} are easy; see Sec. 4.4.4. There must be some provision for copying any value. For structures, such as vectors, physical copies should be made so that they are treated consonantly with other Icon structures. For other data types, the ``copy'' consists of simply returning the value and not making a physically distinct copy. This should be done for data types, such as Boolean, for which there are only descriptors and no associated blocks. Whether or not a copy of a block for a rational value should be made is a more difficult decision and depends on how such values are treated conceptually, at the source-language level. It is, of course, easiest not to make a physical copy. Some image must be provided for every value. This image should contain enough information to distinguish values of different types and, where possible, to provide some useful additional information about the specific value. The amount of detail that it is practical to provide in the image of a value is limited by the fact that the image is a string that must be placed in the allocated string region. The type must be provided for all values and should consist of a simple string name. For example, if \texttt{x} is a Boolean value, \texttt{type(x)} should produce \texttt{"boolean"}. The coding for type is trivial; see Sec. D.2.3. There also are several run-time support routines that must be modified for any new type:\\[1ex] {\renewcommand{\arraystretch}{0.9}% \begin{xtabular}{>{\hspace{1cm}\texttt\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} outimage & image for tracing (in \textfn{runtime/rmisc.r)}\\ order & order for sorting (in \textfn{runtime/rcomp.r)}\\ anycmp & comparison for sorting (in \textfn{runtime/rcomp.r)}\\ equiv & equivalence comparison (in \textfn{runtime/rcomp.r)}\\ \end{xtabular} }\\[1ex] The image produced for tracing purposes is similar to that produced by image and must be provided for all data types. However, outimage produces output and is not restricted to constructing a string in allocated storage. It therefore can be more elaborate and informative. There must be some concept of sorting order for every Icon value. There are two aspects to sorting: the relative order of different data types and the ordering among values of the same type. The routine order produces an integer that corresponds to the order of the type. If the order of a type is important with respect to other types, this matter must be given some consideration. For example, a rational number probably belongs among the numeric types, which, in Icon, sort before structure types. On the other hand, it probably is not important whether vectors come before or after lists. The routine \texttt{anycmp} compares two values; if they have the same order, as defined previously, \texttt{anycmp} determines which is the ``smaller.'' For example, Boolean ``false'' might (or might not) come before ``true,'' but some ordering between the two should be provided. On the other hand, order among vectors probably is not important (or well-defined), and they can be lumped with the other structures in \texttt{anycmp}, for which ordering is arbitrary. Sometimes ordering can be quite complicated; a correct ordering of rational numbers is nontrivial. The routine \texttt{equiv} is used in situations, such as table subscripting and case expressions, to determine whether two values are equivalent in the Icon sense. Generally speaking, two structure values are considered to be equivalent if and only if they are identical. This comparison is included in \texttt{equiv} in a general way. For example, \texttt{equiv} need not be modified for vectors. Similarly, for data types that have no corresponding blocks, descriptor comparison suffices; \texttt{equiv} need not be modified for Boolean values either. However, determining the equivalence of numeric values, such as rational numbers, requires some thought. Although not strictly part of the language proper, there are several routines in the Icon Programming Library and the Unicon class libraries that operate on all types. The addition of new type may require that some of these be modified. A good start to locating code that potentially requires modification is to search for expressions of the form \iconline{case \ldots type( \ldots ) \ldots of} \section{Defined Constants and Macros} Defined constants and macros are used heavily in Icon to parameterize its code for different operating systems and computer architectures and to provide simple, high-level constructions for commonly occurring code sequences that otherwise would be complex and obscure. These defined constants and macros should be used consistently when making additions to Icon instead of using \textit{ad hoc} constructions. This improves portability, readability, and consistency. Learning the meanings and appropriate use of the existing defined constants and macro definitions requires investment of time and energy. Once learned, however, coding is faster, simpler, and less prone to error. \subsection{Defined Constants} The following defined constants are used frequently in the run-time system. This list is by no means exhaustive; for specialized constants, see existing functions.\\[1ex] {\renewcommand{\arraystretch}{0.9}% \begin{xtabular}{>{\hspace{1cm}\texttt\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} CsetSize & number of words needed for 256 bits\\ LogHuge & one plus the maximum base-1O exponent of a C \texttt{double}\\ LogIntSize & base-2 logarithm of number of bits in a C \texttt{int}\\ MaxCvtLen & length of the longest possible string obtained by conversion\\ MaxLong & largest C \texttt{long}\\ MaxShort & largest C \texttt{short}\\ MaxStrLen & longest possible string\\ MinListSlots & minimum number of slots in a list-element block\\ MinLong & smallest C \texttt{long}\\ MinShort & smallest C \texttt{short}\\ WordSize & number of bytes in a \texttt{word}\\ \end{xtabular} }\\[1ex] \subsection{Macros} \PrimaryIndexBegin{Macro} The following macros are used frequently in the run-time system. See \textfn{h/rmacros.h} for most of the definitions and the files included by \textfn{h/rt.h} for the others; see existing routines for examples of usages.\\[0.5cm] {\renewcommand{\arraystretch}{0.9}% \begin{xtabular}{>{\hspace{1cm}\texttt\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} Arg(n) & \hspace{0cm}\texttt{n}th argument to function\\ ArgType(n) & d-word of \texttt{n}th argument to function\\ ArgVal(n) & integer value of v-word of nth argument to function\\ BlkLoc(d) & pointer to block from v-word of \texttt{d}\\ BlkSize(cp) & size of block pointed to by \texttt{cp}\\ BlkType(cp) & type code of block pointed to by \texttt{cp}\\ ChkNull(d) & true if \texttt{d} is a null-valued descriptor\\ CsetOff(b) & offset in a word of cset bit \texttt{b}\\ CsetPtr(b, c) & address of word \texttt{c} containing cset bit \texttt{b}\\ DeRef( d) & dereference \texttt{d}\\ EqlDesc(d1, d2) & true if \texttt{d1} and \texttt{d2} are identical descriptors\\ GetReal(dp, r) & get real number into \texttt{r} from descriptor pointed to by \texttt{dp}\\ IntVal(d) & integer value of v-word of \texttt{d}\\ Max(i, j) & maximum of \texttt{i} and \texttt{j}\\ Min(i. j) & minimum of \texttt{i} and \texttt{j}\\ Mkint(i, dp) & make integer from \texttt{i} in descriptor pointed to by \texttt{dp}\\ Offset(d) & offset from d-word of variable descriptor \texttt{d}\\ Pointer(d) & true if v-word of \texttt{d} is a pointer\\ Qual(d) & true if \texttt{d} is a qualifier\\ Setb(b, c) & set bit \texttt{b} in cset \texttt{c}\\ SlotNum(i, j) & Slot for hash number \texttt{i} given \texttt{j} total slots\\ StrLen(q) & length of string referenced by \texttt{q}\\ StrLoc(q) & location of string referenced by \texttt{q}\\ Testb(b, c) & true if bit \texttt{b} in cset \texttt{c} is one\\ Tvar(d) & true if \texttt{d} is a trapped variable\\ TvarLoc(d) & pointer to trapped variable from v-word of \texttt{d}\\ Type(d) & type code in d-word of \texttt{d}\\ Var(d) & true if \texttt{d} is a variable descriptor\\ VarLoc(d) & pointer to value descriptor from v-word of \texttt{d}\\ Vsizeof(x) & size of structure \texttt{x} less variable array at end\\ Vwsizeof(x) & size of structure \texttt{x} in words less variable array at end\\ Wsizeof(x) & size of structure \texttt{x} in words\\ \end{xtabular} }\\[1ex] \PrimaryIndexEnd{Macro} \section{Support Routines} There are many support routines for performing tasks that occur frequently in the Icon run-time system. Most of these routines are in files in \textfn{runtime} that begin with the letter r. The uses of many of these support routines have been illustrated earlier; what follows is a catalog for reference. \subsection{Comparison} The following routines in \textfn{runtime/rcomp.r} perform comparisons: \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{anycmp(dp1, dp2)} & Compare the descriptors pointed to by \texttt{dp1} and \texttt{dp2} as Icon values in sorting order, returning a value greater than 0, 0, or less than 0 depending on whether the descriptor pointed to by \texttt{dp1} is respectively greater than, equal to, or less than the descriptor pointed to by \texttt{dp2}.\\ \texttt{equiv(dp1, dp2)} & Test for equivalence of descriptors pointed to by \texttt{dp1} and \texttt{dp2}, returning 1 if equivalent and 0 otherwise\\ \texttt{lexcmp(dp1,dp2)} & Compare string qualifiers pointed to by \texttt{dp1} and \texttt{dp2} returning a value greater than 0, 0 or less than 0 depending on whether the string referenced by \texttt{dp1} is respectively greater than, equal to or less than the string referenced by \texttt{dp2}\\ \texttt{numcmp(dp1, dp2, dp3)} & Compare the descriptors pointed to by \texttt{dp1} and \texttt{dp2} as numbers, putting the converted value of the number referenced by \texttt{dp2} in the descriptor pointed to by \texttt{dp3} and returning 0, 0 or less than 0 depending on whether the number referenced by \texttt{dp1} is respectively greater than, equal to or less than the number referenced by \texttt{dp2}\\ \end{xtabular} \subsection{Type Conversion} The following routines in \textfn{src/runtime/cnv.r} perform type conversions: \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{cnv\_cset(s,d)} & Convert the descriptor pointed to by \texttt{s} to a cset and point \texttt{d} to it. Return 1 if the conversion can be performed (or is not needed) and 0 otherwise.\\ \texttt{cnv\_int(s,d)} & Convert the descriptor pointed to by \texttt{s} to an integer and store the value in the location pointed to by \texttt{d}, returning 1 if the conversion can be performed, but \texttt{0} otherwise.\\ \texttt{ston(dp,union numeric *result)} & Convert the string descriptor pointed to by \texttt{dp} to a numeric value and store the value in the location pointed to by \texttt{result}, returning the type if the conversion can be performed, but \texttt{CvtFail} otherwise.\\ \texttt{cvpos(i1, i2)} & Convert \texttt{i1} to a positive value with respect to the length \texttt{i2}, returning 0 if the conversion is not possible.\\ \texttt{cnv\_real(sp,dp)} & Convert the descriptor pointed to by \texttt{sp} to a real number and store the value in the location pointed to by \texttt{dp}, returning 1 if the conversion can be performed, but \texttt{0} otherwise.\\ \texttt{cnv\_str(sp, dp)} & Convert the descriptor pointed to \texttt{sp} to a string, returning \texttt{1} if a resulting string was produced, but \texttt{0} otherwise.\\ \texttt{strprc(dp, i)} & Convert the qualifier pointed to by \texttt{dp} to a procedure descriptor if possible, using \texttt{i} as the number of arguments in the case of a string that represents an operator, returning 0 if the conversion cannot be performed.\\ \end{xtabular} \subsection{Defaults} The following routines in \textfn{src/runtime/def.r} produce default values for omitted arguments. They are generally called internally by RTL constructs. \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{def\_cset(sp, df, dp)} & If the descriptor pointed to by \texttt{sp} is null, store the cset pointed at by \texttt{df} at the place pointed to by \texttt{dp} and return 1; otherwise convert the descriptor pointed to by \texttt{dp} and return 1 if successful. Return 0 if the conversion cannot be performed.\\ \texttt{def\_int(sp, df, dp)} & If the descriptor pointed to by \texttt{sp} is null, store \texttt{df} at the location pointed to by \texttt{dp} and return 1. Otherwise convert the descriptor pointed to by \texttt{sp} to an integer, store it at the location pointed to by \texttt{dp}, and return 1. Return 0 if the conversion cannot be performed.\\ \texttt{def\_str(sp, df, dp)} & If the descriptor pointed to by \texttt{sp} is null, replace it by the descriptor pointed to by \texttt{df} and return 1. Otherwise convert the descriptor pointed by \texttt{sp} to a string and return 1, but return 0 if the conversion cannot be performed.\\ \end{xtabular} \subsection{Allocation} The following routines in \textfn{runtime/ralc.r} all return pointers to the objects they allocate: {\renewcommand{\arraystretch}{1}% \begin{xtabular}{>{\hspace{1cm}\texttt\bgroup}l<{\egroup}@{\hspace{1cm}}p{11cm}} alcblk(i,j) & Allocate a block of \texttt{i} bytes with title \texttt{j} in the allocated block region.\\ alccoexp() & Allocate a co-expression block.\\ alccset(i) & Allocate a cset block for a cset, setting its size to \texttt{i}.\\ alcfile(fp, i, dp) & Allocate a file block. setting its file pointer to \texttt{fp}, its status to i, and its name to the qualifier pointed to by \texttt{dp}.\\ alclint(i) & Allocate a long-integer block and place \texttt{i} in it.\\ alclist(i) & Allocate a list-header block and set its size field to \texttt{i}.\\ alclstb(i1, i2, i3) & Allocate a list element block for \texttt{i1} elements,setting its first field to \texttt{i2} and its \texttt{nused} field to \texttt{i3}.\\ alcreal(r) & Allocate a real-number block and place \texttt{r} in it.\\ alcrecd(i,dp) & Allocate a record block with \texttt{i} fields setting its procedure descriptor to the descriptor pointed to by \texttt{dp}.\\ alcrefresh(ip, i, j) & Allocate a refresh block for a procedure with \texttt{i} arguments, \texttt{j} local identifiers, and entry point \texttt{ip}.\\ alcselem(dp, i) & Allocate a set-element block, setting its \texttt{member} field to the descriptor pointed to by \texttt{dp} and its hash number field to \texttt{i}.\\ alcset() & Allocate a set-header block.\\ alcstr(sbuf,i) & Allocate a string of length \texttt{i}, and copy the string in \texttt{sbuf} into it, provided \texttt{sbuf} is not \texttt{NULL}.\\ alcsubs(i, j, dp) & Allocate a substring trapped-variable block, setting its length field to i, its offset field to \texttt{j}, and its variable decriptor to the descriptor pointed to by \texttt{dp}.\\ alctable(dp) & Allocate a table-header block, setting its default descriptor to the descriptor pointed to by \texttt{dp}.\\ alctelem() & Allocate a table-element block.\\ alctvtbl(dp1, dp2, i) & Allocate a table-element trapped-variable block, setting its link field to the descriptor pointed to by \texttt{dp1}, its entry field to the descriptor pointed to by \texttt{dp2}, and its hash number field to \texttt{i}.\\ blkreq(i) & Request \texttt{i} bytes of free space in the allocated block region.\\ \texttt{strreq(i)} & Request \texttt{i} bytes of space in the allocated string region.\\ \end{xtabular} } \subsection{Operations on Structures} The following routines in \textfn{runtime/rstruct.r} perform operations on structures: \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{addmem(sp, ep, dp)} & Add the set-element block pointed to by \texttt{ep} to the set pointed to by \texttt{sp} at the place pointed to by \texttt{dp}.\\ \texttt{cplist(dp1, dp2, i, j)} & Copy the sublist from \texttt{i} to \texttt{j} of the list referenced by the descriptor pointed to by \texttt{dp1}, and place the result in the descriptor pointed to by \texttt{dp2}.\\ \texttt{memb(sp, dp, i, ip)} & Set the value pointed to by \texttt{ip} to 1 if the descriptor pointed to by \texttt{dp} is a member of the set pointed to by \texttt{dp} is a member of the set pointed to by \texttt{sp} using \texttt{i} as the hash number but to 0 otherwise.\\ \end{xtabular} \subsection{Input and Output} The following routines in \textfn{runtime/rsys.r} perform input and output operations: \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{getstr(sbuf, i, fp)} & Read a line of at most \texttt{i} characters from the file specified by \texttt{fp}, putting the result in \texttt{sbuf}, returning the number of characters read, but returning -1 on an end of file.\\ \texttt{putstr(fp, sbuf, i)} & Write \texttt{i} characters from \texttt{sbuf} on the file specified by \texttt{fp}.\\ \end{xtabular} \subsection{Error Termination} The RTL construct runerr() can appear appear outside the C code of a body or an inline block, reporting errors in parameters, or it may appear in a block of C code. Authors of new runtime system code generally use runerr() and may invent new run-time error codes if needed. Very rarely an internal error may warrant a syserr(). Interestingly, \texttt{runerr()} only works in top-level RTL functions and operators, not in underlying C functions. If an underlying runtime system function has a problem that should result in a runtime error, the C function will have to return an error code of some kind to the top-level RTL function or operator that called it, asking it to produce a runtime error. \begin{tabular}{l@{\hspace{1cm}}p{11cm}} \texttt{runerr(i, dp)} & Terminate execution with error message \texttt{i} showing the offending value in the descriptor pointed to by \texttt{dp} (optional). \\ \texttt{syserr(sbuf)} & Terminate execution with system error message \texttt{sbuf}.\\ \end{tabular} \subsection{Miscellaneous Operations} The following miscellaneous operations are in \textfn{h/rmacros.h} or \textfn{runtime/rmisc.r}: \begin{xtabular}{l@{\hspace{1cm}}p{11cm}} \texttt{deref(dp)} & Dereference the descriptor pointed to by \texttt{dp}.\\ \texttt{hash(dp)} & Return a hash value for the descriptor pointed to by \texttt{dp}.\\ \texttt{outimage(fp, dp, i)} & Write an image for the value of the descriptor pointed to by \texttt{dp} on the file to by \texttt{fp}, but not callling recursively if \texttt{i} is nonzero.\\ \texttt{qtos(qp, sbuf)} & Convert the string represented by the qualifier pointed to by \texttt{qp} to a null-terminated C-style string in \texttt{sbuf}.\\ \end{xtabular} \subsection{Diagnostics} There are two routines in \textfn{runtime/rmemmgt.r} for producing diagnostic output. They are not included in the runtime system unless you build it with DeBugIconx defined in your define.h. %xtabular produces a really stupid page break \begin{tabular}{l@{\hspace{1cm}}p{11cm}} \texttt{descr(dp)} & Write a diagnostic diagram of the descriptor pointed to by \texttt{dp} to standard error output.\\ \texttt{blkdump} & Write a diagnostic diagram of the allocated block region to standard error output.\\ \end{tabular} \section{Debugging} Debugging a modification to Icon can be difficult unless the overall structure of the Icon system is understood. It is especially important to understand the way Icon's data is organized and how storage management works. If an addition to Icon does not work properly, or if Icon itself no longer functions correctly after a modification, it is generally advisable to {\em think} about the possible sources of problems, instead of immediately resorting to a debugger. Print statements are a crude but often effective means of locating the source of an error. When adding diagnostic output use \iconline{fprintf(stderr, " \ldots ",\ \ldots );} \noindent instead of the corresponding \texttt{printf}. On some systems it may be useful to follow such calls by \texttt{fflush(stderr)} to ensure that the diagnostic output appears as soon as it is produced. Icon normally traps floating-point exception and illegal addressing (segmentation violations) since these errors can result from source-language programming errors, such as division by real zero and excessive recursion resulting in C stack overflow. For example, Icon normally produces \texttt{Error 302 ("C stack overflow")} in case of a segmentation violation. This method of handling such an error is appropriate if Icon itself is free of bugs, but it interferes with debugging in situations where there are likely to be bugs in new code. Assigning a value to the environment variable \texttt{ICONCORE} turns off the trapping of such errors. In this case most systems produce meaningful diagnostic messages and a core dump in the case of an exception. If \texttt{ICONCORE} has a value, a core dump is produced if an Icon program terminates as a result of \texttt{runerr} or \texttt{syserr}. It is therefore good practice to set \texttt{ICONCORE} when making modifications to Icon. For an extended debugging session, it may be convenient to set \texttt{dodump} in \textfn{runtime/init.r} to 1. \subsection{Extra help in \texttt{DEVMODE} (Unicon)} When Unicon is built with \texttt{DEVMODE} enabled there are some extra facilities that are provided to help with debugging. \subsubsection{Breakpoints in the Unicon program} It is difficult (when debugging the runtime system) to arrange for a breakpoint to occur at a particular location in the Unicon program. In \texttt{DEVMODE} there is a new standard function, which is callable from Unicon, named \texttt{dbgbreak}. Writing something like the code fragment below will cause a debugger breakpoint at the desired moment -- provided the debugger is instructed to place a break point at the function called \texttt{Zdbgbrk}. \begin{iconcode} \$ifdef \_DEVMODE\\ \>if check\_for\_condition() then \{ dbgbrk() \}\\ \$endif\\ \end{iconcode} Note that use of \texttt{dbgbrk} should always be protected by \texttt{\$ifdef \_DEVMODE} because the function is only defined when \texttt{DEVMODE} is enabled. \subsubsection{Identifying the current Unicon line} The position in the Unicon program can always be established by looking into the runtime system with the debugger. A couple of functions make this process easier. The functions may be invoked directly from \texttt{gdb} or \texttt{lldb}. Both functions are parameterless. \begin{quote} \begin{description} \item[\texttt{dbgUFL}] Print the current Unicon file and line number. \item[\texttt{dbgUTrace}] Print a Unicon stack trace. \end{description} \end{quote} \subsection{Heap Verification (Unicon)} \label{HeapVerifier} Icon may be built with optional extra heap checking code that is enabled by defining \texttt{DebugHeap} in \textfn{h/define.h} or by giving the \texttt{--enable-verifyheap} argument to \texttt{configure}. The extra checking is defined in \textfn{h/rmacros.h} and may be summarized by \begin{iconcode} /* Debug Heap macros. These add runtime checks to catch (most)\\ * illegal block references resulting from untended pointers.\\ * Use during new code development, when gdb and valgrind fail to help.\\ */ \end{iconcode} If the use of these macros fails to pinpoint a suspected heap problem, Unicon has an extra level of checking that is enabled by defining \texttt{VerifyHeap} in \textfn{h/define.h}. Most of the extra verification is performed inside the \texttt{collect} function, just before and just after garbage collection takes place, although some more limited checks are also made during the normal running of the program. The philosophy of the heap verification code is to ``crash early'', as soon as a problem is detected, in the hope that the sooner the debugger is involved after a problem is found, the easier it will be to establish the cause. In the worst case, this ``crash early'' approach can lead to the Unicon compilation system itself crashing (which makes it a bit harder to fix any problems). To mitigate this, the heap verification code is controlled by an environment variable \texttt{VRFY}. The variable is bit-significant, and the bits control which verifications are undertaken. A value of zero means that no verifications are made at all. In general, if the bit corresponding to the typecode of a structure is set, that block type will be verified (e.g. if bit \texttt{(1 <{}< T\_Table)} is set then table header blocks will be verified). The bit test is made on each verification so, in principle, the verification flags could be changed during the execution of the program. The verifier contains a circular log buffer where messages placed by the verification code are stored. The buffer may be displayed in the debugger by calling the routine \texttt{vrfyPrintLog}. If \texttt{VRFY} is set to -1, all checks are enabled, which can lead to a lot of logging output. It may be better to set \texttt{VRFY} to -2 in some cases: this suppresses the most common log message, which can overwrite useful evidence in the (limited capacity) circular log. The verification checks are made by making a pass over the current heap in a similar fashion to the location phase of garbage collection (see section \ref{GC-Location-Phase}). Each block between \texttt{blkbase} and \texttt{blkfree} is examined. In the checking that takes place {\em before} garbage collection, it is unknown whether the block being checked is live or whether it will be collected. Thus pre-GC checking must necessarily be more conservative because it cannot be guaranteed that other blocks pointed to by the present one contain valid information -- they may have already been reused. After the collection has been made it is guaranteed that all the blocks in the current heap are live, or potentially so, and therefore a more extensive set of checks may be performed. For example, after GC it is possible to verify the entire structure of a table -- all the buckets point to valid slots; all the slots point to valid table elements; there are no duplicate elements etc.: before GC, none of these checks is safe because the components of the table (and even the table header block itself) may not be valid. The checking enabled by \texttt{DebugHeap} and \texttt{VerifyHeap} are independent: each may be active without the other although, in practice, it is likely they will be enabled together. \section{Adding new types to the compiler} \newcommand{\toklbra}{\texttt{~~(~~}}% \newcommand{\tokrbra}{\texttt{~~)~~}}% \newcommand{\toklcbra}{\texttt{~~\{~~}}% \newcommand{\tokrcbra}{\texttt{~~\}~~}}% \newcommand{\toklsbra}{\texttt{~~[~~}}% \newcommand{\tokrsbra}{\texttt{~~]~~}}% \newcommand{\tokcolon}{\texttt{~~:~~}}% \newcommand{\cceq}{{\normalfont~~::=~~}}% \newenvironment{ebnf}% {\begin{specialcode}{\itshape}}% {\end{specialcode}}% \begin{quote} Editorial Note: This section is derived from Appendix F of the RTL reference document [.ipd261.]. The RTL manual is reproduced here as appendix G. \end{quote} This section describes how to add new types to Icon. It deals with simple types, aggregate types whose values contain other Icon values, and keywords that are variables. These are the kinds of types that are most likely to be added to the language. Executable types such as procedures and co-expressions are beyond the scope of this section, as are types that require special representations and variable types that have special dereferencing semantics. \subsection{The implementation of Icon Types (reprise)} As discussed in part 1 and Appendix A of this compendium, an Icon value is implemented as a two-word \textit{descriptor} containing type information and value information. The first word of a descriptor is the {\em d-word}. For the types discussed here, the d-word contains a unique code that identifies the type. The second word of the descriptor is the {\em v-word}; it contains the actual value or a reference to the value. Actual values that are too large to fit in one word are usually put in the block region. This region is controlled by a storage management system that includes a garbage collector. The garbage collector is driven by information in arrays indexed using the type codes associated with the blocks. The block region contains values for both simple and aggregate types. There are several other parts of the run-time system besides the garbage collector that need information about Icon types. Some are Icon operations such as the \texttt{type()} function, while others are automatically invoked features such as error trace back. These are described in more detail below. Types, of course, typically have operations associated with them that create and use values of the type. \subsection{The Type Specification System} Icon types are used in several places in RTL and new types must be added to this language. These uses include type checking constructs, \texttt{return}/\texttt{suspend} statements, and abstract type computations. In addition, the Icon compiler needs information about types in order to perform type inferencing. These requirements are satisfied with a type specification system. This system is a simple declarative language for naming types and describing some of their properties. Information from the type specification system is incorporated in \texttt{rtt} and in \texttt{iconc} when they are built. All types specified by the system may be used in the RTL \texttt{is} and \texttt{type\_case} constructs. They may also be used in abstract type computations. Aggregate types may be used in a \texttt{new} type expression in an abstract type computation. A type specification may optionally indicate that RTL supports a special form of \texttt{return}/\texttt{suspend} statement that constructs a return value, in the form of a full descriptor, from a C value for the v-word of the descriptor. Type specifications are in the file \textfn{common/typespec.txt}. Comments in the file start with \texttt{\#} and continue to the end of the line. This file is translated into a C header file by the program \texttt{typespec}. This is not part of the normal Icon build process; entries at the end of \textfn{common/Makefile} must be uncommented if \textfn{typespec.txt} is updated. A type definition in the specification system has the form: \begin{ebnf} type-def \cceq identifier opt-abrv \tokcolon kind opt-return \end{ebnf} \noindent where \textit{identifier} is the name of the type and \textit{opt-abrv} is an optional abbreviation for the type name. The abbreviation has the form: \begin{ebnf} opt-abrv \cceq nil |\\ \>\>\>\> \toklcbra identifier \tokrcbra \end{ebnf} \noindent The abbreviation is used in tracing type inferencing and other places where a compact notation is desired. If no abbreviation is given, the full type name is used. There are three kinds of types: \texttt{simple}, \texttt{aggregate}, and \texttt{variable}. Their syntax and usage are described in separate sections below. \textit{opt-return} indicates optional RTL \texttt{return}/\texttt{suspend} support for the type. The four types of v-words supported by this construct are introduced below as needed. A complete grammar for the specification language is given near the end of this appendix. \subsection{Simple Value Types} Types with a kind clause of \texttt{simple} are simple in the sense that values of the type do not have components that contain other Icon values. These types may otherwise have sophisticated semantics. There are three ways to implement the values of a type: encode them as C integers (these are guaranteed to be at least 32 bits long), implement them as blocks in the block region, or implement them in storage allocated using \texttt{malloc()} (in theory values can also be put in the string region, but it is only well suited for Icon strings; descriptors pointing into this region must have a special form --- described in A.1.1). The choice of implementation determines the type of C value stored in the v-word of the descriptor representing the Icon value. The d-word of a descriptor for one of these types contains a fixed code. It consists of a small integer type code along with flags describing characteristics of the descriptor. The necessary changes to \textfn{rmacros.h} have been covered earlier in section D.3.1. Three of the \textit{opt-return} type specification clauses are useful for implementing value types (the fourth is used for variable types; see below). These clauses add \texttt{return}/\texttt{suspend} statements to RTL of the form \begin{ebnf} \>\texttt{return} \>\>\> type-name \toklbra expr \tokrbra\\ \>\texttt{suspend}\>\>\> type-name \toklbra expr \tokrbra \end{ebnf} \noindent \textit{type-name} is the identifier naming the type. It determines the \texttt{D\_} constant used for the d-word of the operation's result descriptor. \textit{expr} is a C expression whose value is placed in the v-word of the result. The particular \textit{opt-return} clause chosen determines how the C value is stored in the v-word. The clauses are \begin{tabular}{>{\texttt\bgroup}l<{\egroup}% @{\hspace{1cm}}p{11cm}} return C\_integer &% indicates that the value is cast to a C integer; see the definition of word in \textfn{h/typedefs.h} for the exact C type used.\\ return block\_pointer &% indicates that the value is cast to \texttt{(union block *)}; this is usually used for pointers to blocks in the block region.\\ return char\_pointer &% indicates that the value is cast to \texttt{(char *)}. Note, only descriptors of a special form may point into the string region; the storage used with \texttt{return char\_pointer} must reside elsewhere.\\ \end{tabular} As an example, the type specification for the cset type is \begin{iconcode} cset\{c\}:\>\>\> simple\\ \>\>\> return block\_pointer \end{iconcode} \noindent Suppose a variable \texttt{cp} within an Icon operation written in RTL points to a cset block. Then the statement \iconline{return cset(cp);} \noindent constructs a result descriptor for the cset and returns it. For a type with an associated block, a declaration for the block structure must be added to \textfn{h/rstructs.h}. By convention, the structure name is created by prepending \texttt{b\_} to the type name. The first word of a block must contain its \texttt{T\_} type code. If different instances of the block may vary in size, the second word of the block must contain this size in bytes. The structure name of the new block must be added to the \texttt{union block} declaration in \textfn{h/rstructs.h}. An allocation routine for the block must be added to \textfn{runtime/ralc.r}. The macros \texttt{AlcFixBlk()} and \texttt{AlcVarBlk()} are useful in such routines; see other allocation routines for guidelines. There are five arrays in \textfn{runtime/rmemmgt.r} that must be updated for all types (the details have been covered in section D.3.3). Storage for the values of a type usually should be allocated in the block region. However, for interfaces to packages written in C, it may be necessary to use storage that is not relocated by garbage collection. While it is possible to place types allocated with \texttt{malloc()} under control of garbage collection, this is complicated and beyond the scope of this appendix. See the implementation of co-expressions for an example of how this can be done. Short of modifying the garbage collector to handle a new type's allocated storage, the best practice for any storage allocated with \texttt{malloc()} is to provide an Icon function, along the lines of \texttt{close()}, that explicitly frees storage associated with a value when the program is finished using it. The necessary changes to built-in functions and support routines have been covered earlier in section D.3.4. At the end of this appendix is a check list of files that must be updated when a type is added to Icon. \subsection{Aggregate Types} Aggregate types have values with components that are other Icon values. The aggregate type specification provides more sophisticated RTL abstract type computations for the type. These in turn allow \texttt{iconc} to produce code that is better optimized. For interpreter-only implementations, abstract type computations are not used and are optional in RTL code; the \texttt{simple} type specification may be used in that case. However, the discussion later in this section on block layout and on the storage management arrays still applies. The \textit{kind} clause of an \texttt{aggregate} type specification establishes and names abstract components for the type. The clause is of the form \begin{ebnf} kind \cceq \texttt{aggregate} \toklbra component, \ldots \tokrbra\\ \\ component \cceq identifier |\\ \>\>\>\>\> \texttt{var} identifier opt-abrv \end{ebnf} \noindent Note, the opt-return clauses discussed in the previous section may be also used with \texttt{aggregate} types. The \texttt{aggregate} specification can be thought of as establishing a sort of ``abstract type record'' whose fields, the abstract components, summarize the type information of the actual components of values in the type. Most types are given one abstract component. For example, the set type has the specification \begin{iconcode} set\{S\}: \>\>\> aggregate(set\_elem)\\ \>\>\> return block\_pointer \end{iconcode} \noindent where \texttt{set\_elem} represents all the elements of a set. Abstract components can be accessed using dot notation, and the \texttt{new} abstract type computation can be used to establish a new subtype of the type (subtypes only exist internally in the compiler and have no existence at the Icon language level). A subtype can be returned by the operation and has its own component types independent of subtypes created elsewhere. The abstract type computation for Icon set intersection, the \texttt{**} operator, uses both dot notation and a \texttt{new} expression. It performs intersection in the abstract type realm. \texttt{x} and \texttt{y} are the parameters of the operation and may contain different subtypes of the set type: \iconline{ return new set(store[type(x).set\_elem] ** store[type(y).set\_elem]) } \noindent (Note that the components can be thought of as references to information contained in a \textit{type store} --- thus the indexing notation.) Components that represent Icon variables are preceded by \texttt{var} and may be given abbreviations for use in tracing type inferencing. For example, the list type has the specification \begin{iconcode} list\{L\}: \>\>\> aggregate(var lst\_elem\{LE\})\\ \>\>\> return block\_pointer \end{iconcode} \noindent These components may be returned from operations and represent the component as a variable. For example, the abstract type computation for element generation operator when applied to a list is \iconline{ return type(dx).lst\_elem } \noindent where \texttt{dx} is the parameter of the operation. When a value rather than a variable is returned, the component must be ``dereferenced'' by indexing into the store, as in the abstract type computations of \texttt{get()}: \iconline{ return store[type(x).lst\_elem] } \noindent Non-variable components must always be dereferenced. For types, such as tables, that contain Icon values serving different purposes, it may be effective to establish several abstract components. Aggregate types are implemented using blocks that contain descriptors, and they may be implemented using several kinds of blocks, with some blocks having pointers to others. When there are multiple blocks, there is always a \textit{header} block that uses the \texttt{T\_} code of the type. Other blocks are given internal type codes; these codes must be added to \textfn{h/rmacros.h} and entries must be made in the storage management arrays. Any descriptors in a block must be at the end. The type's entry in the \texttt{firstd} array is the location of the first descriptor. Any block pointers in the block must be contiguous. The type's entry in the \texttt{firstp} array is the location of the first pointer and its entry in the \texttt{ptrno} array is the number of pointers. \subsection{Keyword Variable Types} Keyword variable types have a type specification with a \textit{kind} clause of the form \begin{ebnf} kind \cceq \texttt{variable} var-type-spec\\ \\ var-type-spec \cceq \texttt{initially} type |\\ \>\>\>\>\>\> \texttt{always} type\\ \\ type \cceq type-name |\\ \>\>\> type \texttt{~~++~~} type-name\\ \\ type-name \cceq identifier \end{ebnf} The compiler must be able to infer the types of values stored in a keyword variable. The \texttt{initially} option causes the keyword variable type to be treated as a set of global variables, each initialized to the given type specified by the \textit{type} clause. The \texttt{always} option indicates that the keyword always contains values of the given type and the compiler does no actual inference on it. \textit{type} may be the union of several types; this indicates that the type is uncertain and may be any of the ones specified. A special \textit{type-name}, \texttt{any\_value}, indicates complete uncertainty. \noindent The clause \iconline{ always any-value } \noindent is a correct, although entirely imprecise, description of any keyword variable. This appendix assumes that keyword variables are implemented by global descriptors (though other techniques are possible). The \textit{opt-return} clause of the form \iconline{ return descriptor\_pointer } \noindent is useful for implementing keyword variables. The v-word of a result descriptor from a corresponding \texttt{return}/\texttt{suspend} expression is of \texttt{type struct descrip *}. Some of the same files must be updated for variable types as for value types. Type codes must be added to \textfn{h/rmacros.h}. The \texttt{D\_} code must have the \texttt{F\_Var} flag set, for example: \iconline{ \#define D\_Kywdint (T\_Kywdint | D\_Typecode | F\_Ptr | F\_Var) } \noindent The storage management tables and the \texttt{outimage()} routine also must be updated. Other updates are unique to variable types. The global descriptor must be established. \textfn{runtime/data.r} contains its declaration. \texttt{icon\_init()} in \textfn{runtime/init.r} initializes the descriptor, and \textfn{h/rexterns.h} contains an \texttt{extern} for it. Dereferencing must be updated; it is performed by \texttt{deref()} in \textfn{runtime/cnv.r}. Assignment must be updated; it is handled by the macro \texttt{GeneralAsgn()} in \textfn{runtime/oasgn.r}. The \texttt{name()} function is updated by changing the support routine \texttt{get\_name()} in \textfn{runtime/rdebug.r.} The \texttt{variable()} function is updated by changing the support routine \texttt{getvar()} in \textfn{runtime/rmisc.r}. The keyword itself goes in \textfn{runtime/keyword.r}. For example, \texttt{\&random} is of type \texttt{kywdint} and is implemented by the descriptor \texttt{kywd\_ran}; its definition is \begin{iconcode} keyword\{1\} random\\ \>abstract \{\\ \>\>return kywdint\\ \>\}\\ \>inline \{\\ \>\>return kywdint(\&kywd\_ran);\\ \>\}\\ end \end{iconcode} \noindent For the interpreter, the keyword name must be added to \texttt{icont/key\_text.c}. These names are in alphabetical order. {\color{blue} Unicon generates the keyword names automatically from \texttt{runtime/keyword.r} using the \texttt{mkkwd} program --- \texttt{icont/key\_text.c} no longer exists.} If the descriptor may contain a value under control of garbage collection, the support routine \texttt{collect()} in \textfn{runtime/rmemmgt.r} must be updated. \texttt{postqual()} preserves references to the string region; the macro \texttt{Qual()} is used to check for such references. \texttt{markblock()} preserves references to blocks; the macro \texttt{Pointer()} is used to check for such references. \clearpage \newpage \subsection{The Complete Grammar for the Type Specification System} \begin{ebnf} type-def \>\>\>\> \cceq \>\> identifier opt-abrv \tokcolon kind opt-return\\ \\ kind \>\>\>\> \cceq \>\> \texttt{simple} |\\ \>\>\>\>\>\> \texttt{aggregate} \toklbra component, \ldots \tokrbra |\\ \>\>\>\>\>\> \texttt{variable} var-type-spec\\ \\ component \>\>\>\> \cceq \>\> identifier |\\ \>\>\>\>\>\> \texttt{var} identifier opt-abrv\\ \\ var-type-spec \>\>\>\>\cceq \>\> \texttt{initially} type |\\ \>\>\>\>\>\> \texttt{always} type\\ \\ type \>\>\>\> \cceq \>\> type-name |\\ \>\>\>\>\>\> type \texttt{~~++~~} type-name\\ \\ type-name \>\>\>\> \cceq \>\> identifier\\ \\ opt-abrv \>\>\>\> \cceq \>\> nil |\\ \>\>\>\>\>\> \toklcbra identifier \tokrcbra\\ \\ opt-return \>\>\>\> \cceq \>\>nil |\\ \>\>\>\>\>\> \texttt{return block\_pointer} |\\ \>\>\>\>\>\> \texttt{return descriptor\_pointer} |\\ \>\>\>\>\>\> \texttt{return char\_pointer} |\\ \>\>\>\>\>\> \texttt{return C\_integer}\\ \end{ebnf} \clearpage \newpage \section% {A check list for adding types% {\hfill\small\textit{(on one page for easy photocopying)}}} % {\renewcommand{\arraystretch}{0.8}% Squeeze the lines together \begin{noIndex} \begin{tabular}{% @{$\square$\hspace{0.5cm}}>{\textfn\bgroup}l<{\egroup}% @{\hspace{0.5cm}--\hspace{0.5cm}}l% } \multicolumn{2}{l}{\bf All Types}\\ \multicolumn{2}{l}{}\\ common/typespec.txt & add type specification\\ common/Makefile & uncomment entries near the end of the file\\ h/rmacros.h & add T\_\textit{Type} macro\\ h/rmacros.h & add D\_\textit{Type} macro\\ runtime/rmemmgt.r & \texttt{bsizes} table\\ runtime/rmemmgt.r & \texttt{firstd} table\\ runtime/rmemmgt.r & \texttt{firstp} table\\ runtime/rmemmgt.r & \texttt{ptrno} table\\ runtime/rmemmgt.r & \texttt{blkname} table\\ runtime/rmisc.r & update \texttt{outimage()}\\ \multicolumn{2}{l}{}\\ \multicolumn{2}{l}{\bf All Value Types\vspace{2ex}}\\ runtime/fmisc.r & update \texttt{copy()}\\ runtime/fmisc.r & update \texttt{type()}\\ runtime/rcomp.r & update \texttt{anycmp()}\\ runtime/rcomp.r & update \texttt{order()}\\ runtime/rcomp.r & update \texttt{equiv()}\\ runtime/rmisc.r & update \texttt{getimage()}\\ \multicolumn{2}{l}{}\\ \multicolumn{2}{l}{\bf Types Implemented In The Block Region\vspace{2ex}}\\ h/rstructs.h & add declaration for the block structure\\ h/rstructs.h & update the \texttt{union block} declaration\\ runtime/ralc.r & add an allocation routine\\ \multicolumn{2}{l}{}\\ \multicolumn{2}{l}{\bf Types With Sizes\vspace{2ex}}\\ runtime/omisc.r & update size operator\\ \multicolumn{2}{l}{}\\ \multicolumn{2}{l}{\bf All Keyword Variable Types\vspace{2ex}}\\ h/rexterns.h & extern for keyword descriptor\\ runtime/cnv.r & update \texttt{deref()}\\ runtime/data.r & declaration for keyword descriptor\\ runtime/init.r & initialize keyword descriptor\\ runtime/keyword.r & add keyword declaration\\ runtime/oasgn.r & update \texttt{GeneralAsgn()} macro\\ runtime/rdebug.r & update \texttt{get\_name()}\\ runtime/rmisc.r & update \texttt{getvar()}\\ \multicolumn{2}{l}{}\\ \multicolumn{2}{l}{\bf Keyword Variables That Must Be Garbage Collected\vspace{2ex}}\\ runtime/rmemmgt.r & update \texttt{collect()}\\ \end{tabular} \end{noIndex} } \bigskip
{ "alphanum_fraction": 0.7494517204, "avg_line_length": 42.0130140552, "ext": "tex", "hexsha": "4aebd17d7632fb3ff503ca0bef28174c63f2614c", "lang": "TeX", "max_forks_count": 16, "max_forks_repo_forks_event_max_datetime": "2022-03-01T06:01:00.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-14T04:32:36.000Z", "max_forks_repo_head_hexsha": "df79234dc1b8a4972f3908f601329591c06bd141", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "jschnet/unicon", "max_forks_repo_path": "doc/ib/appD.tex", "max_issues_count": 83, "max_issues_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_issues_repo_issues_event_max_datetime": "2022-03-22T11:32:35.000Z", "max_issues_repo_issues_event_min_datetime": "2019-11-03T20:07:12.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "MatthewCLane/unicon", "max_issues_repo_path": "doc/ib/appD.tex", "max_line_length": 135, "max_stars_count": 35, "max_stars_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "MatthewCLane/unicon", "max_stars_repo_path": "doc/ib/appD.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-01T06:00:40.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-29T13:19:55.000Z", "num_tokens": 21789, "size": 80707 }
% Default to the notebook output style % Inherit from the specified cell style. \documentclass[11pt]{article} \usepackage[T1]{fontenc} % Nicer default font (+ math font) than Computer Modern for most use cases \usepackage{mathpazo} % Basic figure setup, for now with no caption control since it's done % automatically by Pandoc (which extracts ![](path) syntax from Markdown). \usepackage{graphicx} % We will generate all images so they have a width \maxwidth. This means % that they will get their normal width if they fit onto the page, but % are scaled down if they would overflow the margins. \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth \else\Gin@nat@width\fi} \makeatother \let\Oldincludegraphics\includegraphics % Set max figure width to be 80% of text width, for now hardcoded. \renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}} % Ensure that by default, figures have no caption (until we provide a % proper Figure object with a Caption API and a way to capture that % in the conversion process - todo). \usepackage{caption} \DeclareCaptionLabelFormat{nolabel}{} \captionsetup{labelformat=nolabel} \usepackage{adjustbox} % Used to constrain images to a maximum size \usepackage{xcolor} % Allow colors to be defined \usepackage{enumerate} % Needed for markdown enumerations to work \usepackage{geometry} % Used to adjust the document margins \usepackage{amsmath} % Equations \usepackage{amssymb} % Equations \usepackage{textcomp} % defines textquotesingle % Hack from http://tex.stackexchange.com/a/47451/13684: \AtBeginDocument{% \def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code } \usepackage{upquote} % Upright quotes for verbatim code \usepackage{eurosym} % defines \euro \usepackage[mathletters]{ucs} % Extended unicode (utf-8) support \usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document \usepackage{fancyvrb} % verbatim replacement that allows latex \usepackage{grffile} % extends the file name processing of package graphics % to support a larger range % The hyperref package gives us a pdf with properly built % internal navigation ('pdf bookmarks' for the table of contents, % internal cross-reference links, web links for URLs, etc.) \usepackage{hyperref} \usepackage{longtable} % longtable support required by pandoc >1.10 \usepackage{booktabs} % table support for pandoc > 1.12.2 \usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment) \usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout) % normalem makes italics be italics, not underlines % Colors for the hyperref package \definecolor{urlcolor}{rgb}{0,.145,.698} \definecolor{linkcolor}{rgb}{.71,0.21,0.01} \definecolor{citecolor}{rgb}{.12,.54,.11} % ANSI colors \definecolor{ansi-black}{HTML}{3E424D} \definecolor{ansi-black-intense}{HTML}{282C36} \definecolor{ansi-red}{HTML}{E75C58} \definecolor{ansi-red-intense}{HTML}{B22B31} \definecolor{ansi-green}{HTML}{00A250} \definecolor{ansi-green-intense}{HTML}{007427} \definecolor{ansi-yellow}{HTML}{DDB62B} \definecolor{ansi-yellow-intense}{HTML}{B27D12} \definecolor{ansi-blue}{HTML}{208FFB} \definecolor{ansi-blue-intense}{HTML}{0065CA} \definecolor{ansi-magenta}{HTML}{D160C4} \definecolor{ansi-magenta-intense}{HTML}{A03196} \definecolor{ansi-cyan}{HTML}{60C6C8} \definecolor{ansi-cyan-intense}{HTML}{258F8F} \definecolor{ansi-white}{HTML}{C5C1B4} \definecolor{ansi-white-intense}{HTML}{A1A6B2} % commands and environments needed by pandoc snippets % extracted from the output of `pandoc -s` \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \newenvironment{Shaded}{}{} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}} \newcommand{\RegionMarkerTok}[1]{{#1}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\NormalTok}[1]{{#1}} % Additional commands for more recent versions of Pandoc \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}} \newcommand{\ImportTok}[1]{{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}} \newcommand{\BuiltInTok}[1]{{#1}} \newcommand{\ExtensionTok}[1]{{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} % Define a nice break command that doesn't care if a line doesn't already % exist. \def\br{\hspace*{\fill} \\* } % Math Jax compatability definitions \def\gt{>} \def\lt{<} % Document parameters \title{JHU Practical Machine Learning} % Pygments definitions \makeatletter \def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax% \let\PY@ul=\relax \let\PY@tc=\relax% \let\PY@bc=\relax \let\PY@ff=\relax} \def\PY@tok#1{\csname PY@tok@#1\endcsname} \def\PY@toks#1+{\ifx\relax#1\empty\else% \PY@tok{#1}\expandafter\PY@toks\fi} \def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{% \PY@it{\PY@bf{\PY@ff{#1}}}}}}} \def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}} \expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} \expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}} \expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}} \expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}} \expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}} \expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}} \expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}} \expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit} \expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf} \expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} \expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}} \expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \def\PYZbs{\char`\\} \def\PYZus{\char`\_} \def\PYZob{\char`\{} \def\PYZcb{\char`\}} \def\PYZca{\char`\^} \def\PYZam{\char`\&} \def\PYZlt{\char`\<} \def\PYZgt{\char`\>} \def\PYZsh{\char`\#} \def\PYZpc{\char`\%} \def\PYZdl{\char`\$} \def\PYZhy{\char`\-} \def\PYZsq{\char`\'} \def\PYZdq{\char`\"} \def\PYZti{\char`\~} % for compatibility with earlier versions \def\PYZat{@} \def\PYZlb{[} \def\PYZrb{]} \makeatother % Exact colors from NB \definecolor{incolor}{rgb}{0.0, 0.0, 0.5} \definecolor{outcolor}{rgb}{0.545, 0.0, 0.0} % Prevent overflowing lines due to hard-to-break entities \sloppy % Setup hyperref package \hypersetup{ breaklinks=true, % so long urls are correctly broken across lines colorlinks=true, urlcolor=urlcolor, linkcolor=linkcolor, citecolor=citecolor, } % Slightly bigger margins than the latex defaults \geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in} \begin{document} \maketitle \section{JHU Practical Machine Learning}\label{jhu-practical-machine-learning} Course URL: https://www.coursera.org/learn/practical-machine-learning/home/welcome \subsubsection{Lesson 1 - What is Prediction?}\label{lesson-1---what-is-prediction} Basic ML Workflow question -\textgreater{} input data -\textgreater{} features -\textgreater{} algorithm -\textgreater{} parameters -\textgreater{} evaluation \#\#\#\#\#\# SPAM Example \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}57}]:} \PY{c+c1}{\PYZsh{} 1. Load and examine dataset} \PY{c+c1}{\PYZsh{} install.packages(\PYZdq{}kernlab\PYZdq{})} \PY{k+kn}{library}\PY{p}{(}kernlab\PY{p}{)} data\PY{p}{(}spam\PY{p}{)} \PY{k+kp}{paste}\PY{p}{(}\PY{l+s}{\PYZsq{}}\PY{l+s}{Columns: \PYZsq{}}\PY{p}{,} \PY{k+kp}{length}\PY{p}{(}\PY{k+kp}{names}\PY{p}{(}spam\PY{p}{)}\PY{p}{)}\PY{p}{)} \PY{k+kp}{head}\PY{p}{(}spam\PY{p}{)} \end{Verbatim} 'Columns: 58' \begin{tabular}{r|llllllllllllllllllllllllllllllllllllllllllllllllllllllllll} make & address & all & num3d & our & over & remove & internet & order & mail & ⋯ & charSemicolon & charRoundbracket & charSquarebracket & charExclamation & charDollar & charHash & capitalAve & capitalLong & capitalTotal & type\\ \hline 0.00 & 0.64 & 0.64 & 0 & 0.32 & 0.00 & 0.00 & 0.00 & 0.00 & 0.00 & ⋯ & 0.00 & 0.000 & 0 & 0.778 & 0.000 & 0.000 & 3.756 & 61 & 278 & spam \\ 0.21 & 0.28 & 0.50 & 0 & 0.14 & 0.28 & 0.21 & 0.07 & 0.00 & 0.94 & ⋯ & 0.00 & 0.132 & 0 & 0.372 & 0.180 & 0.048 & 5.114 & 101 & 1028 & spam \\ 0.06 & 0.00 & 0.71 & 0 & 1.23 & 0.19 & 0.19 & 0.12 & 0.64 & 0.25 & ⋯ & 0.01 & 0.143 & 0 & 0.276 & 0.184 & 0.010 & 9.821 & 485 & 2259 & spam \\ 0.00 & 0.00 & 0.00 & 0 & 0.63 & 0.00 & 0.31 & 0.63 & 0.31 & 0.63 & ⋯ & 0.00 & 0.137 & 0 & 0.137 & 0.000 & 0.000 & 3.537 & 40 & 191 & spam \\ 0.00 & 0.00 & 0.00 & 0 & 0.63 & 0.00 & 0.31 & 0.63 & 0.31 & 0.63 & ⋯ & 0.00 & 0.135 & 0 & 0.135 & 0.000 & 0.000 & 3.537 & 40 & 191 & spam \\ 0.00 & 0.00 & 0.00 & 0 & 1.85 & 0.00 & 0.00 & 1.85 & 0.00 & 0.00 & ⋯ & 0.00 & 0.223 & 0 & 0.000 & 0.000 & 0.000 & 3.000 & 15 & 54 & spam \\ \end{tabular} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}60}]:} \PY{c+c1}{\PYZsh{} This dataset is an example of what could be used as training data for a classification problem.} \PY{c+c1}{\PYZsh{} Interestingly, all variables except for 1 are features. The remaining one is the labeled classification.} \PY{o}{?}spam \end{Verbatim} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}58}]:} \PY{k+kp}{names}\PY{p}{(}spam\PY{p}{)} \end{Verbatim} \begin{enumerate*} \item 'make' \item 'address' \item 'all' \item 'num3d' \item 'our' \item 'over' \item 'remove' \item 'internet' \item 'order' \item 'mail' \item 'receive' \item 'will' \item 'people' \item 'report' \item 'addresses' \item 'free' \item 'business' \item 'email' \item 'you' \item 'credit' \item 'your' \item 'font' \item 'num000' \item 'money' \item 'hp' \item 'hpl' \item 'george' \item 'num650' \item 'lab' \item 'labs' \item 'telnet' \item 'num857' \item 'data' \item 'num415' \item 'num85' \item 'technology' \item 'num1999' \item 'parts' \item 'pm' \item 'direct' \item 'cs' \item 'meeting' \item 'original' \item 'project' \item 're' \item 'edu' \item 'table' \item 'conference' \item 'charSemicolon' \item 'charRoundbracket' \item 'charSquarebracket' \item 'charExclamation' \item 'charDollar' \item 'charHash' \item 'capitalAve' \item 'capitalLong' \item 'capitalTotal' \item 'type' \end{enumerate*} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}50}]:} str\PY{p}{(}spam\PY{p}{)} \end{Verbatim} \begin{Verbatim}[commandchars=\\\{\}] 'data.frame': 4601 obs. of 58 variables: \$ make : num 0 0.21 0.06 0 0 0 0 0 0.15 0.06 {\ldots} \$ address : num 0.64 0.28 0 0 0 0 0 0 0 0.12 {\ldots} \$ all : num 0.64 0.5 0.71 0 0 0 0 0 0.46 0.77 {\ldots} \$ num3d : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ our : num 0.32 0.14 1.23 0.63 0.63 1.85 1.92 1.88 0.61 0.19 {\ldots} \$ over : num 0 0.28 0.19 0 0 0 0 0 0 0.32 {\ldots} \$ remove : num 0 0.21 0.19 0.31 0.31 0 0 0 0.3 0.38 {\ldots} \$ internet : num 0 0.07 0.12 0.63 0.63 1.85 0 1.88 0 0 {\ldots} \$ order : num 0 0 0.64 0.31 0.31 0 0 0 0.92 0.06 {\ldots} \$ mail : num 0 0.94 0.25 0.63 0.63 0 0.64 0 0.76 0 {\ldots} \$ receive : num 0 0.21 0.38 0.31 0.31 0 0.96 0 0.76 0 {\ldots} \$ will : num 0.64 0.79 0.45 0.31 0.31 0 1.28 0 0.92 0.64 {\ldots} \$ people : num 0 0.65 0.12 0.31 0.31 0 0 0 0 0.25 {\ldots} \$ report : num 0 0.21 0 0 0 0 0 0 0 0 {\ldots} \$ addresses : num 0 0.14 1.75 0 0 0 0 0 0 0.12 {\ldots} \$ free : num 0.32 0.14 0.06 0.31 0.31 0 0.96 0 0 0 {\ldots} \$ business : num 0 0.07 0.06 0 0 0 0 0 0 0 {\ldots} \$ email : num 1.29 0.28 1.03 0 0 0 0.32 0 0.15 0.12 {\ldots} \$ you : num 1.93 3.47 1.36 3.18 3.18 0 3.85 0 1.23 1.67 {\ldots} \$ credit : num 0 0 0.32 0 0 0 0 0 3.53 0.06 {\ldots} \$ your : num 0.96 1.59 0.51 0.31 0.31 0 0.64 0 2 0.71 {\ldots} \$ font : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ num000 : num 0 0.43 1.16 0 0 0 0 0 0 0.19 {\ldots} \$ money : num 0 0.43 0.06 0 0 0 0 0 0.15 0 {\ldots} \$ hp : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ hpl : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ george : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ num650 : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ lab : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ labs : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ telnet : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ num857 : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ data : num 0 0 0 0 0 0 0 0 0.15 0 {\ldots} \$ num415 : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ num85 : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ technology : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ num1999 : num 0 0.07 0 0 0 0 0 0 0 0 {\ldots} \$ parts : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ pm : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ direct : num 0 0 0.06 0 0 0 0 0 0 0 {\ldots} \$ cs : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ meeting : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ original : num 0 0 0.12 0 0 0 0 0 0.3 0 {\ldots} \$ project : num 0 0 0 0 0 0 0 0 0 0.06 {\ldots} \$ re : num 0 0 0.06 0 0 0 0 0 0 0 {\ldots} \$ edu : num 0 0 0.06 0 0 0 0 0 0 0 {\ldots} \$ table : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ conference : num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ charSemicolon : num 0 0 0.01 0 0 0 0 0 0 0.04 {\ldots} \$ charRoundbracket : num 0 0.132 0.143 0.137 0.135 0.223 0.054 0.206 0.271 0.03 {\ldots} \$ charSquarebracket: num 0 0 0 0 0 0 0 0 0 0 {\ldots} \$ charExclamation : num 0.778 0.372 0.276 0.137 0.135 0 0.164 0 0.181 0.244 {\ldots} \$ charDollar : num 0 0.18 0.184 0 0 0 0.054 0 0.203 0.081 {\ldots} \$ charHash : num 0 0.048 0.01 0 0 0 0 0 0.022 0 {\ldots} \$ capitalAve : num 3.76 5.11 9.82 3.54 3.54 {\ldots} \$ capitalLong : num 61 101 485 40 40 15 4 11 445 43 {\ldots} \$ capitalTotal : num 278 1028 2259 191 191 {\ldots} \$ type : Factor w/ 2 levels "nonspam","spam": 2 2 2 2 2 2 2 2 2 2 {\ldots} \end{Verbatim} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}41}]:} \PY{k+kt}{c}\PY{p}{(}\PY{k+kp}{min}\PY{p}{(}spam\PY{o}{\PYZdl{}}your\PY{p}{)}\PY{p}{,} \PY{k+kp}{max}\PY{p}{(}spam\PY{o}{\PYZdl{}}your\PY{p}{)}\PY{p}{)} \end{Verbatim} \begin{enumerate*} \item 0 \item 11.11 \end{enumerate*} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}42}]:} density\PY{p}{(}spam\PY{o}{\PYZdl{}}your\PY{p}{[}spam\PY{o}{\PYZdl{}}type\PY{o}{==}\PY{l+s}{\PYZdq{}}\PY{l+s}{nonspam\PYZdq{}}\PY{p}{]}\PY{p}{)} \end{Verbatim} \begin{verbatim} Call: density.default(x = spam$your[spam$type == "nonspam"]) Data: spam$your[spam$type == "nonspam"] (2788 obs.); Bandwidth 'bw' = 0.06322 x y Min. :-0.1897 Min. :0.000000 1st Qu.: 2.5827 1st Qu.:0.000459 Median : 5.3550 Median :0.003009 Mean : 5.3550 Mean :0.089983 3rd Qu.: 8.1273 3rd Qu.:0.030547 Max. :10.8997 Max. :4.061939 \end{verbatim} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}63}]:} \PY{c+c1}{\PYZsh{} Y axis = Density of being spam (blue) or not spam (red)} plot\PY{p}{(}density\PY{p}{(}spam\PY{o}{\PYZdl{}}your\PY{p}{[}spam\PY{o}{\PYZdl{}}type\PY{o}{==}\PY{l+s}{\PYZdq{}}\PY{l+s}{nonspam\PYZdq{}}\PY{p}{]}\PY{p}{)}\PY{p}{,} col\PY{o}{=}\PY{l+s}{\PYZdq{}}\PY{l+s}{blue\PYZdq{}}\PY{p}{,} main\PY{o}{=}\PY{l+s}{\PYZdq{}}\PY{l+s}{\PYZdq{}}\PY{p}{,} xlab\PY{o}{=}\PY{l+s}{\PYZdq{}}\PY{l+s}{Frequency of \PYZsq{}your\PYZsq{}\PYZdq{}}\PY{p}{)} lines\PY{p}{(}density\PY{p}{(}spam\PY{o}{\PYZdl{}}your\PY{p}{[}spam\PY{o}{\PYZdl{}}type\PY{o}{==}\PY{l+s}{\PYZdq{}}\PY{l+s}{spam\PYZdq{}}\PY{p}{]}\PY{p}{)}\PY{p}{,} col\PY{o}{=}\PY{l+s}{\PYZdq{}}\PY{l+s}{red\PYZdq{}}\PY{p}{)} \PY{c+c1}{\PYZsh{} Perhaps above a 0.5 \PYZdq{}uses of \PYZsq{}your\PYZsq{}\PYZdq{}/e\PYZhy{}mail frequency cutoff is good for classifying as spam? Of course though, there\PYZsq{}s no such thing as \PYZdq{}0.5\PYZdq{} uses of a word in an e\PYZhy{}mail? Unless perhaps the dataset means multiple e\PYZhy{}mails from the same person.} abline\PY{p}{(}v\PY{o}{=}\PY{l+m}{0.5}\PY{p}{,} col\PY{o}{=}\PY{l+s}{\PYZdq{}}\PY{l+s}{black\PYZdq{}}\PY{p}{)} \end{Verbatim} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_8_0.png} \end{center} { \hspace*{\fill} \\} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}66}]:} \PY{k+kp}{summary}\PY{p}{(}spam\PY{o}{\PYZdl{}}type\PY{p}{)} \end{Verbatim} \begin{description*} \item[nonspam] 2788 \item[spam] 1813 \end{description*} Evaluating Accuracy of a Binary Classifier In this case, I call it 'binary' because there is only spam/nonspam. Accuracy evaluation can be thought of as something like "the percentage of spam that was classified correctly as spam, plus the same for nonspam". The same number could be reached by starting with 1 and subtracing the misclassified spam and the misclassified non-spam percentages. This is apparently an "optimistic" estimate of the overall error rate, which I will learn about later in the course. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}86}]:} prediction \PY{o}{\PYZlt{}\PYZhy{}} \PY{k+kp}{ifelse}\PY{p}{(}spam\PY{o}{\PYZdl{}}your \PY{o}{\PYZgt{}} \PY{l+m}{0.5}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{spam\PYZsq{}}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{nonspam\PYZsq{}}\PY{p}{)} \PY{k+kp}{paste}\PY{p}{(}\PY{l+s}{\PYZsq{}}\PY{l+s}{Predicted Spam: \PYZsq{}}\PY{p}{,} \PY{k+kp}{length}\PY{p}{(}prediction\PY{p}{[}prediction\PY{o}{==}\PY{l+s}{\PYZsq{}}\PY{l+s}{spam\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{ / \PYZsq{}}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{Predicted Non\PYZhy{}spam: \PYZsq{}}\PY{p}{,} \PY{k+kp}{length}\PY{p}{(}prediction\PY{p}{[}prediction\PY{o}{==}\PY{l+s}{\PYZsq{}}\PY{l+s}{nonspam\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{p}{)} prediction\PYZus{}table \PY{o}{\PYZlt{}\PYZhy{}} \PY{k+kp}{table}\PY{p}{(}prediction\PY{p}{,} spam\PY{o}{\PYZdl{}}type\PY{p}{)}\PY{o}{/}\PY{k+kp}{length}\PY{p}{(}spam\PY{o}{\PYZdl{}}type\PY{p}{)} prediction\PYZus{}table \PY{k+kp}{paste}\PY{p}{(}\PY{l+s}{\PYZsq{}}\PY{l+s}{Accuracy: \PYZsq{}}\PY{p}{,} prediction\PYZus{}table\PY{p}{[}\PY{l+s}{\PYZsq{}}\PY{l+s}{spam\PYZsq{}}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{spam\PYZsq{}}\PY{p}{]} \PY{o}{+} prediction\PYZus{}table\PY{p}{[}\PY{l+s}{\PYZsq{}}\PY{l+s}{nonspam\PYZsq{}}\PY{p}{,} \PY{l+s}{\PYZsq{}}\PY{l+s}{nonspam\PYZsq{}}\PY{p}{]}\PY{p}{)} \end{Verbatim} 'Predicted Spam: 2021 / Predicted Non-spam: 2580' \begin{verbatim} prediction nonspam spam nonspam 0.4590306 0.1017170 spam 0.1469246 0.2923278 \end{verbatim} 'Accuracy: 0.75135840034775' % Add a bibliography block to the postdoc \end{document}
{ "alphanum_fraction": 0.6137923638, "avg_line_length": 48.9877408056, "ext": "tex", "hexsha": "878f8319743d9fcab96df0121da8668f570f7a8f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f7308d2594a91589546e5ec3ed2bbb5ce8fb73fe", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "joeflack4/jupyter_notebooks", "max_forks_repo_path": "courses/machine_learning_track/01_jhu_practical_machine_learning/notebook.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f7308d2594a91589546e5ec3ed2bbb5ce8fb73fe", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "joeflack4/jupyter_notebooks", "max_issues_repo_path": "courses/machine_learning_track/01_jhu_practical_machine_learning/notebook.tex", "max_line_length": 486, "max_stars_count": null, "max_stars_repo_head_hexsha": "f7308d2594a91589546e5ec3ed2bbb5ce8fb73fe", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "joeflack4/jupyter_notebooks", "max_stars_repo_path": "courses/machine_learning_track/01_jhu_practical_machine_learning/notebook.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 12162, "size": 27972 }
% file: lamprop-manual.tex % vim:fileencoding=utf-8:fdm=marker:ft=tex:spelllang=en % % Copyright © 2018,2020 R.F. Smith <[email protected]>. All rights reserved. % Created: 2018-05-13 23:48:04 +0200 % Last modified: 2021-05-25T17:15:50+0200 \newcommand{\twodigits}[1]{\ifnum#1<10 0\number#1\else\number#1\fi} \newcommand{\TheDate}{\number\year-\twodigits\month-\twodigits\day} \newcommand{\TheTitle}{Lamprop manual} \newcommand{\trefpi}[1]{\tref{#1} op page~\pageref{#1}} \documentclass[a4paper,landscape,oneside,11pt,twocolumn]{memoir} \usepackage{fontspec} \usepackage{graphicx} \usepackage{siunitx} \usepackage{ifthen} \usepackage{enumitem} \usepackage{listings} \usepackage{natbib} \usepackage{url} % Set fonts for XeTeX {{{1 \setmainfont{Alegreya}[ SmallCapsFont={Alegreya SC}, SmallCapsFeatures={Letters=SmallCaps}, ] \setsansfont{TeX Gyre Heros} \setmonofont{TeX Gyre Cursor} \usepackage[italic]{mathastext} % Hyperref moet als laatste geladen worden, \usepackage[bookmarks,pdfborder={0 0 0}]{hyperref} % met uitzondering van uitbreidingen erop. \usepackage{memhfixc} % Grootte van de pagina \setlength{\trimtop}{0pt} \setlength{\trimedge}{\stockwidth} \addtolength{\trimedge}{-\paperwidth} % Een A4 op zijn kant is 297 mm breed en 210 mm hoog. % We halen 30 mm van de hoogte af, en 20 mm van de breedte. \settypeblocksize{180mm}{277mm}{*} % Volgens het log-bestand: % Text height and width: 514.20023pt by 788pt % Columnsep and columnseprule: 10pt and 0pt % Dat betekent dat een kolom (788 - 10)/2 = 389pt = 137.2 mm is. % Een foto op 300 dpi moet dan 389/72*300 = 1621 pixels breed zijn. % Maar op de breedte van 1200 pixels passen er twee foto's in één % kolom. \setulmargins{*}{*}{1} \setlrmargins{*}{*}{1} \setheadfoot{\onelineskip}{1.5\onelineskip} \setheaderspaces{*}{*}{1} \checkandfixthelayout % Instellingen van het siunitx pakket. \sisetup{detect-all=true, mode=text, group-digits=true, input-decimal-markers={.,}, output-decimal-marker={.}, exponent-product=\times, separate-uncertainty=true, load-configurations=abbreviations} % voor enumitem \setlist[itemize,1]{leftmargin=*} \setlist[enumerate,1]{leftmargin=*} \setlist[description,1]{leftmargin=*} \setlist{noitemsep} % Instellingen voor polyglossia %\setmainlanguage[babelshorthands=true]{english} % Opmaak van listings {{{1 \lstset{ language=python, % backgroundcolor=\color{inputbackground}, extendedchars=\true, aboveskip=\smallskipamount, belowskip=\smallskipamount, breaklines=true, % basicstyle=\small \ttfamily, basicstyle=\small, showstringspaces=false, commentstyle=\itshape, stringstyle=\ttfamily, upquote=true, columns=fullflexible, % tighter character kerning, like verb inputencoding=utf8, extendedchars=true, literate= {á}{{\'a}}1 {é}{{\'e}}1 {í}{{\'i}}1 {ó}{{\'o}}1 {ú}{{\'u}}1 {Á}{{\'A}}1 {É}{{\'E}}1 {Í}{{\'I}}1 {Ó}{{\'O}}1 {Ú}{{\'U}}1 {à}{{\`a}}1 {è}{{\`e}}1 {ì}{{\`i}}1 {ò}{{\`o}}1 {ù}{{\`u}}1 {À}{{\`A}}1 {È}{{\'E}}1 {Ì}{{\`I}}1 {Ò}{{\`O}}1 {Ù}{{\`U}}1 {ä}{{\"a}}1 {ë}{{\"e}}1 {ï}{{\"i}}1 {ö}{{\"o}}1 {ü}{{\"u}}1 {Ä}{{\"A}}1 {Ë}{{\"E}}1 {Ï}{{\"I}}1 {Ö}{{\"O}}1 {Ü}{{\"U}}1 {â}{{\^a}}1 {ê}{{\^e}}1 {î}{{\^i}}1 {ô}{{\^o}}1 {û}{{\^u}}1 {Â}{{\^A}}1 {Ê}{{\^E}}1 {Î}{{\^I}}1 {Ô}{{\^O}}1 {Û}{{\^U}}1 {œ}{{\oe}}1 {Œ}{{\OE}}1 {æ}{{\ae}}1 {Æ}{{\AE}}1 {ß}{{\ss}}1 {ű}{{\H{u}}}1 {Ű}{{\H{U}}}1 {ő}{{\H{o}}}1 {Ő}{{\H{O}}}1 {ç}{{\c c}}1 {Ç}{{\c C}}1 {ø}{{\o}}1 {å}{{\r a}}1 {Å}{{\r A}}1 {€}{{\euro}}1 {£}{{\pounds}}1 {«}{{\guillemotleft}}1 {»}{{\guillemotright}}1 {ñ}{{\~n}}1 {Ñ}{{\~N}}1 {¿}{{?`}}1 } \lstdefinestyle{plain}{ basicstyle=\tiny\ttfamily, columns=fixed, language={}, backgroundcolor={}, identifierstyle={} } % Definities van locale modificaties. MOET achter de invoeging van het ifthen % pakket komen! %\ifthenelse{\VCModified>0}% %{\newcommand{\locmod}{\textcolor{red}{~(m)}}}{\newcommand{\locmod}{}} % Document parameters {{{1 \title{\TheTitle} \author{Roland F. Smith} \date{\TheDate} % Kop- en voetteksten \makeevenhead{plain}{\thetitle}{}{} \makeoddhead{plain}{\thetitle}{}{} \makeheadrule{plain}{\textwidth}{\normalrulethickness} \makefootrule{plain}{\textwidth}{\normalrulethickness}{0pt} \makeevenfoot{plain}{\theauthor}{\thepage}{\TheDate} \makeoddfoot{plain}{\theauthor}{\thepage}{\TheDate} \makepagestyle{logboek} \makeevenhead{logboek}{\thetitle}{}{\rightmark} \makeoddhead{logboek}{\thetitle}{}{\rightmark} \makeheadrule{logboek}{\textwidth}{\normalrulethickness} \makefootrule{logboek}{\textwidth}{\normalrulethickness}{0pt} \makeevenfoot{logboek}{\theauthor}{\thepage}{\TheDate} \makeoddfoot{logboek}{\theauthor}{\thepage}{\TheDate} % Ruimte voor nummering in lijsten \cftsetindents{section}{1.5em}{3.0em} \cftsetindents{figure}{1.5em}{3.0em} \cftsetindents{table}{1.5em}{3.0em} \sloppy \makeindex % Document info. {{{1 \special{pdf:docinfo << /Title (\TheTitle) /Author (\theauthor) /Subject (lamprop) /Keywords (lamprop, manual, Roland Smith) /CreationDate (D:20180108164304+0100) >>} %%%%%%%%%%%%%%%%%%%%%%%%% start van het document %%%%%%%%%%%%%%%%%%%%% {{{1 \begin{document} % Specifiek voor MEMOIR % Maak de lijsten minder open. \tightlists % Opmaak voor verklarende tekst bij figuren \hangcaption \captiontitlefont{\small} \chapterstyle{section} % Gebruik twee kolommen in TOC etc. \doccoltocetc % Geen inspringen, maar ruimte tussen paragrafen %\setlength{\parskip}{\baselineskip} \nonzeroparskip \setlength{\parindent}{0pt} \setbeforesecskip{5pt} \setaftersecskip{1pt} \setbeforesubsecskip{5pt} \setaftersubsecskip{1pt} \bibliographystyle{plainnat} \begin{titlingpage} \setlength{\parindent}{0pt} % Anders klopt uitlijning v.d. \rule's niet. \vspace*{\stretch{1}} \rule{\linewidth}{1mm}\vspace{5pt} \begin{flushright} {\Huge \TheTitle}\\[5mm] {\huge Roland F. Smith} \end{flushright} \rule{\linewidth}{1mm} \vspace*{\stretch{3}} \begin{center} \Large Eindhoven \number\year \end{center} \end{titlingpage} % Om paginanummering gelijk te krijgen aan pagina's in PDF. \setcounter{page}{2} % Gebruik de standaard opmaak. \pagestyle{logboek} % Inhoudsopgave \begin{KeepFromToc} \tableofcontents \end{KeepFromToc} % Lijst met afbeeldingen. %\listoffigures % Lijst met tabellen %\listoftables % Index %\printindex \clearpage % Alleen hoofdstuk- en sectie nummers. \setcounter{secnumdepth}{1} %%%%%%%%%%%%%%%%%%%% Inleiding %%%%%%%%%%%%%%%%%%%% \chapter{Introduction} % {{{1 The purpose of this program is to calculate some properties of fiber-reinforced composite laminates. It calculates: \begin{itemize} \item engineering properties like $E_x$, $E_y$ and $G_{xy}$ \item thermal properties like $\alpha_x$ and $\alpha_x$ \item physical properties like density and laminate thickness \item stiffness and compliance matrices (\textsc{abd} and abd) \end{itemize} Although these properties are not very difficult to calculate, (the relevant equations and formulas can be readily found in the available composite literature) the calculation is time-consuming and error-prone when done by hand. This program can \emph{not} calculate the strength of composite laminates; because there are many different failure modes, strengths of composite laminates cannot readily be calculated from the strengths of the separate materials that form the laminate. These strengths really have to be determined from tests. However, the author has found \citet{1992WeiEn..52...29H} useful for initial estimation of the strengths of multi-layer laminates. The original version of this program was written in C, since implementing it in a spreadsheet proved cumbersome, inflexible and even produced incorrect results. The C version ran up to 1.3.x. As an exercise in learning the language, the author ported the program to the Python programming language. This proved to be a much cleaner, more maintainable and shorter implementation. In the meantime, the program was ported from python version 2 to python version 3 and the core objects were replaced by functions. (now in \texttt{core.py} ) Also the output method was made generic to enable output in different formats, such as \LaTeX and \textsc{html}. Additionally, the generally hard to obtain transverse fiber properties were replaced with properties derived from the matrix. The books from \citet{Hyer:1998} and \citet{Tsai:1992} and the report from \citet{Nettles:1994} were instrumental in writing the code. More recently, the books from \citet{Bower:2010} and \citet{Barbero:2008,Barbero:2018} have proved crucial for the author's understanding of the subject matter and thus improvements in the code. All the important code is covered by tests using pytest, and pylama is used to check the code when it is saved. \chapter{Building and installing the program} % {{{1 \section{Requirements} % {{{2 The only requirement is Python (version 3.6 or later). Currently the development is done using Python 3.9. For developers: You will need pytest\footnote{\url{https://docs.pytest.org/}} to run the provided tests. Code checks are done using pylama\footnote{\url{http://pylama.readthedocs.io/en/latest/}}. Both should be invoked from the root directory of the repository. There are basically two versions of this program; a console version (installed as \texttt{lamprop}) primarily meant for \textsc{posix} operating systems and a \textsc{gui} version (installed as \texttt{lamprop-gui}) primarily meant for ms-windows. You can try both versions without installing them first, with the following invocations in a shell from the root directory of the repository. Use \texttt{python3 console.py -h} for the console version, and \texttt{python3 gui.py} for the \textsc{gui} version. \section{Installation} % {{{2 First, you need to install Python 3. For \textsc{unix}-like operating systems, use the packages or build scripts that your operating system provides. There are Python binaries for ms-windows available from python.org\footnote{\url{https://www.python.org/downloads/}}, and those should work fine for lamprop. Once the requirements are met, you can proceed with the installation of lamprop. \begin{itemize} \item Unpack the tarball or zipfile, or clone the github repository. \item Open a terminal window or (on ms-windows a \texttt{cmd} window). \item Change into the directory where lamprop was unpacked or cloned. \item Run \texttt{python setup.py}. This will tell you where it will try to install the programs. It will also create both self-contained scripts. \item Run \texttt{python setup.py install}. This will install the self-contained scripts in the standard location for your operating system. \end{itemize} The installation locations are: \begin{itemize} \item \verb|$HOME/.local/bin| for \textsc{posix} operating systems. \item The \verb|Scripts| folder of your Python installation on ms-windows \end{itemize} If the \verb|Scripts| folder isn't writable on ms-windows, the installer will try a different location in your documents folder. For Python scripts to work well, the installation location should be named in your \textsc{path} environment variable. \chapter{Using the program} % {{{1 There are basically two ways to use lamprop. \begin{enumerate} \item Use the command-line program \texttt{lamprop}. \item Use the \textsc{gui}-program \texttt{lamprop-gui}. \end{enumerate} They depend on files written in a domain-specific language, which is documented in the next section. \section{The lamprop file format} % {{{2 The file format is very simple. Functional lines have either \texttt{f}, \texttt{r}, \texttt{t}, \texttt{m}, \texttt{l}, \texttt{c} or \texttt{s} as the first non whitespace character. This character must immediately be followed by a colon \texttt{:}. All other lines are seen as comments and disregarded. This program assumes specific metric units. The units used below are important because the program internally calculates the thickness of layers (in mm) based on the volume fractions and densities of the fibers and resins. The \texttt{f:}-line line contains a definition of a fiber. The parser converts this into an instance of a \texttt{Fiber} object The line must contain the following values, separated by white space: \begin{description} \item[$E_1$] Young's modulus in the fiber direction in \si{MPa}. \item[$\nu_{12}$] Poisson's constant (dimensionless). \item[$\alpha_1$] Coefficient of Thermal Expansion in the fiber direction in \si{K^{-1}}. \item[$\rho$] Density of the fiber in \si{g/cm^3}. \item[$name$] The identifier for the resin. This should be unique in all the files read. Contrary to the previous values, this may contain whitespace. \end{description} Below an example for standard e-glass fiber.\\ \texttt{73000 0.33 5.3e-6 2.60 e-glass} Usually, $E_1$ and other properties in the fibre length direction are easily obtained from a fiber supplier. Previous versions of this program also required the Young's modulus perpendicular to the fiber to calculate transverse properties of the lamina. Since this value is generally not given in the manufacturer documentation, it has been replaced by a value calculated with the Halpin-Tsai formula from \citet[p. 117]{Barbero:2018} as of version 2020-12-26. Previously the modulus of the matrix multiplied by a factor was used, according to \citet{Tsai:1992}. However, the author has found that the factor provided by Tsai overestimates $E_2$. In the \texttt{tools} subdirectory of the source distribution you will find a script called \texttt{convert-lamprop.py} to convert old-style fiber lines to the new format. The \texttt{r:}-line line contains a definition of a resin. Like with the fibers, this becomes an instance of a \texttt{Resin} object in the code. The resin line must contain the following values, separated by white space. \begin{description} \item[$E$] Young's modulus in \si{MPa}. \item[$\nu$] Poisson's constant (dimensionless). \item[$\alpha$] Coefficient of Thermal Expansion in \si{K^{-1}}. \item[$\rho$] Density of the resin in \si{g/cm^3}. \item[$name$] The identifier for the resin. This should be unique in all the files read. Contrary to the previous values, this may contain whitespace. \end{description} An example of a generic thermoset resin is shown below.\\ \texttt{3800 0.36 40e-6 1.165 generic} The \texttt{t:} line starts a new laminate. It only contains the name which identifies the laminate. This name must be unique within the current input files. It may contain spaces. The \texttt{m:} line chooses a resin for the laminate. It must appear after a \texttt{t:} line, and before the \texttt{l:} lines. It must contain the following values, separated by white space: \begin{description} \item[$vf$] The fiber volume fraction. This should be a number between 0 and 1 or between 1 up to and including 100. In the latter case it is interpreted als a percentage. \item[$name$] The name of the resin to use. This must have been previously declared with an \texttt{r:}-line. \end{description} The \texttt{l:} line defines a single layer (lamina) in the laminate. It must be preceded by a \texttt{t:} and a \texttt{m:} line. It must contain the following values, separated by white space (optional items in brackets): \begin{description} \item[$weight$] The area weight in \si{g/m^2} of the dry fibers. \item[$angle$] The angle upwards from the x-axis under which the fibers are oriented. \item[($vf$)] Optionally the layer can have a different fiber volume fraction. \item[$name$] The name of the fiber used in this layer. This fiber must have been declared previously with an \texttt{f:} line. \end{description} The last line in a laminate definition can be an \texttt{s:} line, which stands for "symmetry". This signifies that all the lamina before it are to be added again in reverse order, making a symmetric laminate stack. An \texttt{s:} line in any other position is an error. An example is given below. \begin{lstlisting}[style=plain] Fiber definition E1 v12 alpha1 rho naam f: 233000 0.2 -0.54e-6 1.76 Hyer's carbon fiber Matrix definition Em v alpha rho name r: 4620 0.36 41.4e-6 1.1 Hyer's resin t: [0/90]s laminate This is a standard symmetric cross-ply laminate. It has fine extensional moduli in the fiber directions, but a very low shear modulus. m: 0.5 Hyer's resin l: 100 0 Hyer's carbon fiber l: 100 90 Hyer's carbon fiber s: \end{lstlisting} There is no artificial limit to the amount of layers that you can use other than Python running out of memory. The author has used laminates with up to 250 layers. Calculating the properties of that laminate took approximately \SI{0.5}{s} on a machine with an Intel Core2 Q9300 running FreeBSD. Interspersed between the \texttt{l:} lines (and before the \texttt{s:} line) there can be \texttt{c:} lines. These are comments about the lay-up, that will be inserted into the output. Their use is to signify that the \texttt{l:} lines following a comment are part of a subassembly such as a woven fabric or a non-crimp fabric. For example: \begin{lstlisting}[style=plain] E1 ν12 α1 ρ name f: 70000 0.33 5e-6 2.54 e-glas Em ν α ρ name r: 3600 0.36 60e-6 1.145 atlac-430 t: atlac430-combi m: 0.22 atlac-430 c: CSM 450 l: 150 0 e-glas l: 150 60 e-glas l: 150 -60 e-glas c: UNIE640 l: 600 0 0.50 e-glas l: 40 90 0.50 e-glas c: -- symmetric -- s: \end{lstlisting} This will yield the output shown in \trefpi{tab:atlac430-combi}. \begin{table}[!htbp] \renewcommand{\arraystretch}{1.2} \caption{\label{tab:atlac430-combi}properties of atlac430-combi} \centering\footnotesize{\rule{0pt}{10pt} \tiny calculated by lamprop 2021.05.25\\[3pt]} \renewcommand{\arraystretch}{1.0} \begin{tabular}[t]{rcrrl} \multicolumn{5}{c}{\small\textbf{Laminate stacking}}\\[0.1em] \toprule %% \usepackage{booktabs} Layer & Weight & Angle & vf & Fiber type\\ & [g/m$^2$] & [$\circ$] & [\%]\\ \midrule \multicolumn{5}{l}{CSM 450}\\ 1 & 150 & 0 & 22 & e-glas\\ 2 & 150 & 60 & 22 & e-glas\\ 3 & 150 & -60 & 22 & e-glas\\ \multicolumn{5}{l}{UNIE640}\\ 4 & 600 & 0 & 50 & e-glas\\ 5 & 40 & 90 & 50 & e-glas\\ \multicolumn{5}{l}{-- symmetric --}\\ \multicolumn{5}{l}{UNIE640}\\ 6 & 40 & 90 & 50 & e-glas\\ 7 & 600 & 0 & 50 & e-glas\\ \multicolumn{5}{l}{CSM 450}\\ 8 & 150 & -60 & 22 & e-glas\\ 9 & 150 & 60 & 22 & e-glas\\ 10 & 150 & 0 & 22 & e-glas\\ \bottomrule \end{tabular}\hspace{0.02\textwidth} \begin{tabular}[t]{rrlrrl} \multicolumn{3}{c}{\small\textbf{Physical properties}}\\[0.1em] \toprule Property & Value & Dimension\\ \midrule $\mathrm{v_f}$ & 32.8 &\%\\ $\mathrm{w_f}$ & 52 &\%\\ thickness & 2.62 & mm\\ density & 1.6 & g/cm$^3$\\ weight & 4195 & g/m$^2$\\ resin & 2015 & g/m$^2$\\ \midrule \multicolumn{6}{c}{\small\textbf{Engineering properties}}\\[0.1em] \multicolumn{3}{c}{\small\textbf{In-plane}} & \multicolumn{3}{c}{\small\textbf{3D stiffness tensor}}\\[0.1em] $\mathrm{E_x}$ & 19638 & MPa & $\mathrm{E_x}$ & 19642 & MPa\\ $\mathrm{E_y}$ & 11413 & MPa & $\mathrm{E_y}$ & 11436 & MPa\\ $\mathrm{E_z}$ & 7504 & MPa & $\mathrm{E_z}$ & 8841 & MPa\\ $\mathrm{G_{xy}}$ & 3770 & MPa & $\mathrm{G_{xy}}$ & 3770 & MPa\\ $\mathrm{G_{xz}}$ & 2592 & MPa & $\mathrm{G_{xz}}$ & 2796 & MPa\\ $\mathrm{G_{yz}}$ & 2681 & MPa & $\mathrm{G_{yz}}$ & 2868 & MPa\\ $\mathrm{\nu_{xy}}$ & 0.3270 &- &$\mathrm{\nu_{xy}}$ & 0.3276 &-\\ $\mathrm{\nu_{yx}}$ & 0.1900 &- & $\mathrm{\nu_{xz}}$ & 0.3634 &-\\ $\mathrm{\alpha_x}$ & 1.466e-05 & K$^{-1}$ &$\mathrm{\nu_{yz}}$ & 0.4103 &-\\ $\mathrm{\alpha_y}$ & 3.934e-05 & K$^{-1}$\\ \bottomrule \end{tabular} \end{table} \section{Material data} % {{{2 Over the years, the author has gathered a lot of data for different fibers from data sheets provided by the manufacturers. Data for different fibers is given in \tref{tb:fibers}. In case the $\nu_{12}$ is not known for a carbon fiber, it is estimated at 0.2. Similarly, if the $\alpha_1$ is not known, it is estimated at \SI{-0.12e-6}{K^{-1}}. For glass fibers, $\nu_{12}$ is estimated 0.33 unless known and $\alpha_1$ is estimated \SI{5e-6}{K^{-1}} unless known. \begin{table}[!htbp] \centering \caption{\label{tb:fibers}fibers} \begin{tabular}{lrrrrl}% l,c,r Name & $E_1$ & $\nu_{12}$ & $\alpha_1$ & $\rho$ & Type\\ & [\si{MPa}] & [-] & [\si{K^{-1}}] & [\si{g/cm^3}]\\ \midrule Tenax HTA & 238000 & 0.20 & -0.1e-6 & 1.76 & carbon\\ Tenax HTS & 240000 & 0.20 & -0.1e-6 & 1.77 & carbon\\ Tenax STS40 & 240000 & 0.20 & -0.12e-6 & 1.78 & carbon\\ Toracya T300 & 230000 & 0.27 & -0.41e-6 & 1.76 & carbon\\ Torayca T700SC & 230000 & 0.27 & -0.38e-6 & 1.80 & carbon\\ pyrofil TR30S & 235000 & 0.20 & -0.5e-6 & 1.79 & carbon\\ sigrafil CT24-5.0-270/E100 & 270000 & 0.25 & -0.12e-6 & 1.79 & carbon\\ K63712 & 640000 & 0.234 & -1.47e-6 & 2.12 & carbon\\ K63A12 & 790000 & 0.23 & -1.2e-6 & 2.15 & carbon\\ Torayca T800S & 294000 & 0.27 & -0.60e-6 & 1.76 & carbon\\ K13C2U & 900000 & 0.234 & -1.47e-6 & 2.20 & carbon\\ M35J & 339000 & 0.27 & -0.73e-6 & 1.75 & carbon\\ M46J & 436000 & 0.234 & -0.9e-6 & 1.84 & carbon\\ PX35UD & 242000 & 0.27 & -0.6e-6 & 1.81 & carbon\\ Granoc XN-80-60S & 780000 & 0.27 & -1.5e-6 & 2.17 & carbon\\ Granoc XN-90-60S & 860000 & 0.27 & -1.5e-6 & 2.19 & carbon\\ e-glass & 73000 & 0.33 & 5.3e-6 & 2.60 & glass\\ ecr-glass & 81000 & 0.33 & 5e-6 & 2.62 & glass\\ \end{tabular} \end{table} Several resins are shown in \tref{tb:resins}. For resins, $\nu$ is estimated 0.36 unless known and $\alpha$ is estimated \SI{40e-6}{K^{-1}} unless known. \begin{table}[!htbp] \centering \caption{\label{tb:resins}Resins} \begin{tabular}{lrrrrl}% l,c,r Name & $E$ & $\nu$ & $\alpha$ & $\rho$ & Type\\ & [\si{MPa}] & [-] & [\si{K^{-1}}] & [\si{g/cm^3}]\\ \midrule Epikote EPR04908 & 2900 & 0.25 & 40e-6 & 1.15 & epoxy\\ Palatal P4-01 & 4300 & 0.36 & 40e-6 & 1.19 & polyester\\ Synolite 2155-N-1 & 4000 & 0.36 & 40e-6 & 1.22 & polyester\\ Distitron 3501LS1 & 4100 & 0.36 & 40e-6 & 1.2 & polyester\\ Synolite 1967-G-6 & 3800 & 0.36 & 40e-6 & 1.165 & \textsc{dcpd}\\ atlac 430 & 3600 & 0.36 & 55e-6 & 1.145 & vinylester\\ \end{tabular} \end{table} \section{Using the command-line program} % {{{2 The command \texttt{lamrop -h} produces the following overview of the options. \begin{lstlisting}[style=plain] usage: lamprop [-h] [-l | -H] [-e] [-m] [-f] [-L | -v] [--log {debug,info,warning,error}] [file ...] positional arguments: file one or more files to process optional arguments: -h, --help show this help message and exit -l, --latex generate LaTeX output (the default is plain text) -H, --html generate HTML output -e, --eng output only the layers and engineering properties -m, --mat output only the ABD and abd matrices -f, --fea output only material data for FEA -L, --license print the license -v, --version show program's version number and exit --log {debug,info,warning,error} logging level (defaults to 'warning') \end{lstlisting} Running \texttt{lamprop} from the command line produces text output by default. Output in \LaTeX{} or \textsc{html} format can be requested with the appropriate arguments. As of 4.0, \textsc{rtf} output (for inclusion in word processor documents) has been removed. Since most word processors can read \textsc{html}, use that instead. \section{Using the \textsc{gui} program} % {{{2 The \textsc{gui} program was written (using \texttt{tkinter}) primarily for users of ms-windows, since they are generally not used to the command-line interface. The contents of its window are shown in \fref{fig:lamprop-gui}. The image shows the looks of the widgets on \textsc{unix}-like operating systems. On ms-windows it follows the native look. The \textsf{File} button allows you to load a lamprop file. If a file is loaded its name is shown right of the button. The \textsf{Reload} button re-loads a file. If a file is loaded, the \textsf{text} and \textsf{html} buttons allow you to save the output as a file. The checkboxes below determine which results are shown. If a file contains different laminates, the dropbox allows you to select a laminate to display. The textbox at the bottom shows the lamprop output as text. \begin{figure}[!htbp] \centerline{\includegraphics[scale=0.5]{lamprop-gui.png}} \caption{\label{fig:lamprop-gui}lamprop \textsc{gui}} \end{figure} \section{Meaning of the ABD, H and C matrices} % {{{2 The stiffness or ABD matrix are what converts in-plane strains into forces and the other way around, see \tref{tab:quasi-isotropic-mat}. It is a 6×6 matrix that can be divided into three 3×3 matrices; A, B and D. The expansions below reveal the symmetries in this matrix. \[ ABD = \left|\begin{array}{cccccc} A_{11} & A_{12} & A_{16} & B_{11} & B_{12} & B_{16}\\ A_{12} & A_{22} & A_{26} & B_{12} & B_{22} & B_{26}\\ A_{16} & A_{26} & A_{66} & B_{16} & B_{26} & B_{66}\\ B_{11} & B_{12} & B_{16} & D_{11} & D_{12} & D_{16}\\ B_{12} & B_{22} & B_{26} & D_{12} & D_{22} & D_{26}\\ B_{16} & B_{26} & B_{66} & D_{16} & D_{26} & D_{66}\\ \end{array}\right| \] The units of the parts of the ABD matrix are as follows (where $i$ and $j$ are 1, 2 or 6): $A_{ij}$ is in \si{N/mm}. $B_{ij}$ is in \si{Nmm/mm} = \si{N}. $D_{ij}$ is in \si{N.mm}. The stress resultants $N$ are units of force per unit of width (\si{N/mm}). Moment resultants $m$ are in units of torque per unit of width (\si{Nmm/mm} = \si{N}). Both strains $\epsilon$ and $\kappa$ are dimensionless. The ABD and H matrix equations in \tref{tab:quasi-isotropic-mat} basically show the behavior of a square piece of laminate small enough that the stress and strain resultants can be considered constant over its dimensions. If we ignore thermal effects, the most general form of linear elasticity is: \[ \sigma_{ij} = C_{ijkl}\epsilon_{kl} \] The matrix C shown below and in \tref{tab:quasi-isotropic-mat} is a contracted form of the stiffness tensor $C_{ijkl}$, see \citet[p. 78]{Bower:2010}. The indices first indicate the row, then the column. \[ C = \left|\begin{array}{cccccc} c_{11} & c_{12} & c_{13} & c_{14} & c_{15} & c_{16}\\ & c_{22} & c_{23} & c_{24} & c_{25} & c_{26}\\ & & c_{33} & c_{34} & c_{35} & c_{36}\\ & & & c_{44} & c_{45} & c_{46}\\ & \mathrm{sym.} & & & c_{55} & c_{56}\\ & & & & & c_{66}\\ \end{array}\right| \] The matrix indices use Voight notation, see \citet[p. 310]{Barbero:2008}. This contraction is possible because symmetries in the tensor reduce the number of independant material constants from 81 to 21. Note that the indices in the stress and strain vectors are not the same as those used in the C matrix! The two indices in the stress and strain vectors indicate respectively the plane whereupon the stress/strain operates and the direction in which said stress/strain points. In this case, 1 is x, 2 is y and 3 is z. \begin{table}[!htbp] \renewcommand{\arraystretch}{1.2} \caption{\label{tab:quasi-isotropic-mat}properties of quasi isotropic laminate} \centering\footnotesize{\rule{0pt}{10pt} \tiny calculated by lamprop 2020.12.26\\[3pt]} \begin{tabular}[t]{rcrrl} \multicolumn{5}{c}{\small\textbf{Laminate stacking}}\\[0.1em] \toprule %% \usepackage{booktabs} Layer & Weight & Angle & vf & Fiber type\\ & [g/m$^2$] & [$\circ$] & [\%]\\ \midrule 1 & 100 & 0 & 50 & Hyer's carbon fiber\\ 2 & 100 & 90 & 50 & Hyer's carbon fiber\\ 3 & 100 & 45 & 50 & Hyer's carbon fiber\\ 4 & 100 & -45 & 50 & Hyer's carbon fiber\\ 5 & 100 & -45 & 50 & Hyer's carbon fiber\\ 6 & 100 & 45 & 50 & Hyer's carbon fiber\\ 7 & 100 & 90 & 50 & Hyer's carbon fiber\\ 8 & 100 & 0 & 50 & Hyer's carbon fiber\\ \bottomrule \end{tabular}\hspace{0.02\textwidth} \begin{tabular}[t]{rrlrrl} \multicolumn{3}{c}{\small\textbf{Physical properties}}\\[0.1em] \toprule Property & Value & Dimension\\ \midrule $\mathrm{v_f}$ & 50 &\%\\ $\mathrm{w_f}$ & 61.5 &\%\\ thickness & 0.909 & mm\\ density & 1.43 & g/cm$^3$\\ weight & 1300 & g/m$^2$\\ resin & 500 & g/m$^2$\\ \bottomrule \end{tabular} \vbox{ \vbox{\small\textbf{In-plane stiffness (ABD) matrix}\\[-3mm] \tiny\[\left\{\begin{array}{c} N_x\\ N_y\\ N_{xy}\\ M_x\\ M_y\\ M_{xy} \end{array}\right\} = \left|\begin{array}{cccccc} 5.02\times 10^{4} & 1.65\times 10^{4} & 0 & 0 & 0 & 0\\ 1.65\times 10^{4} & 5.02\times 10^{4} & 0 & 0 & 0 & 0\\ 0 & 0 & 1.68\times 10^{4} & 0 & 0 & 0\\ 0 & 0 & 0 & 4.99\times 10^{3} & 5.11\times 10^{2} & 1.51\times 10^{2}\\ 0 & 0 & 0 & 5.11\times 10^{2} & 3.18\times 10^{3} & 1.51\times 10^{2}\\ 0 & 0 & 0 & 1.51\times 10^{2} & 1.51\times 10^{2} & 5.29\times 10^{2}\\ \end{array}\right| \times \left\{\begin{array}{c} \epsilon_x\\[2pt] \epsilon_y\\[2pt] \gamma_{xy}\\[2pt] \kappa_x\\[2pt] \kappa_y\\[2pt] \kappa_{xy} \end{array}\right\}\] } \vbox{\small\textbf{Transverse stiffness (H) matrix}\\[-2mm] \tiny\[\left\{\begin{array}{c} V_y\\ V_x \end{array}\right\} = \left|\begin{array}{cc} 4.1429\times 10^{3} & 0\\ 0 & 4.2354\times 10^{3}\\ \end{array}\right| \times \left\{\begin{array}{c} \gamma_{yz}\\[2pt] \gamma_{xz} \end{array}\right\}\] } \vbox{\small\textbf{3D stiffness tensor (C), contracted notation}\\[-3mm] \tiny\[\left\{\begin{array}{c} \sigma_{11}\\ \sigma_{22}\\ \sigma_{33}\\ \sigma_{23}\\ \sigma_{13}\\ \sigma_{12} \end{array}\right\} = \left|\begin{array}{cccccc} 5.90\times 10^{4} & 2.20\times 10^{4} & 8.97\times 10^{3} & 0 & 0 & 0\\ 2.20\times 10^{4} & 5.90\times 10^{4} & 8.97\times 10^{3} & 0 & 0 & 0\\ 8.97\times 10^{3} & 8.97\times 10^{3} & 2.12\times 10^{4} & 0 & 0 & 0\\ 0 & 0 & 0 & 5.52\times 10^{3} & 0 & 0\\ 0 & 0 & 0 & 0 & 5.52\times 10^{3} & 0\\ 0 & 0 & 0 & 0 & 0 & 1.85\times 10^{4}\\ \end{array}\right| \times \left\{\begin{array}{c} \epsilon_{11}\\[2pt] \epsilon_{22}\\[2pt] \epsilon_{33}\\[2pt] 2\cdot\epsilon_{23}\\[2pt] 2\cdot\epsilon_{13}\\[2pt] 2\cdot\epsilon_{12} \end{array}\right\}\] } } \end{table} \chapter{Tips and tricks} % {{{1 The 0\textdegree{} direction is generally in the length of the part or in the direction of the largest load. The following section should be considered a \emph{general guideline}. Sometimes there can be good reason to deviate from it. \section{Keep your laminates symmetric and balanced} % {{{2 Looking at the stacking of the layers, it should be symmetric w.r.t. the middle of the stack. So the following laminate is symmetric: \begin{enumerate} \item 0\textdegree \item 45\textdegree \item 90\textdegree \item -45\textdegree \item -45\textdegree \item 90\textdegree \item 45\textdegree \item 0\textdegree \end{enumerate} This is often shortened to “[0/45/90/-45]s”. The area weights of the layers should also be symmetric. A balanced laminate is a laminate where for every layer at an angle on \emph{n}\textdegree{} there is also a layer at \emph{-n}\textdegree. It is often added that for every 0\textdegree{} layer there should also be an equally sized 90\textdegree{} layer, but the author disagrees. For beam-like parts it is often desirable to have the majority of the fibers in the 0\textdegree{} direction. Classical laminate theory strictly speaking is only valid for stackings of unidirectional layers. For woven fabrics and random oriented fiber products approximations are used. \section{Representing woven fabrics} A woven fabric is approximated as a [0\textdegree/90\textdegree]s stack, where the weight of each layer is 1/4 of the total weight of the woven fabric. If warp and weft of the weave are not of equal weight, you should adjust the layers accordingly. Symmetry is important because a lone [0\textdegree/90\textdegree] would exhibit tension/bending coupling that doesn't occur in a woven fabric. If the woven fabric is a small part of a larger stacking, you can use [0\textdegree/90\textdegree] to represent a weave. \section{Representing non-wovens} Things like chopped strand mat (“\textsc{csm}”), continuous filament mat (“\textsc{cfm}”) or other non-wovens can be approximated as a [0\textdegree/60\textdegree/-60\textdegree]s stack with the area weight evenly divided over the directions. Do keep in mind that the fiber volume fraction for such materials is significantly lower than for unidirectional or woven materials. For \textsc{csm} it is unlikely to exceed 25\% and for \textsc{cfm} 10--15\% are typical values. \section{Align your fibers with the expected load} This is a no-brainer for tensile loads, but there is a twist. To counter torsion and shear loads, there should be layers of fibers in the \textpm 45\textdegree{} direction. For bending loads the 0\textdegree{} layers should be at the outside of the part. \section{Laminate strength} As mentioned before, this program cannot predict the strength of laminates from the properties of the fibers and resin used in the layers; it is outside the scope of classical laminate theory. Even stronger, the author does not believe that a general theory of laminate strength based on constituent properties is feasible due to the many different possible failure modes and the factors outside of the fiber and resin properties that influence the laminate. Examples of the latter are the void content, the degree of cure of the resin and errors in cutting or placing the fibers. These are determined by type of production process used and the craftsmanship of the people involved. However, the following guidelines have served the author well over the years. For unidirectional layers loaded in the fiber direction, the strain at which either the fibers or the matrix fail in tension multiplied by the laminate's Young's modulus is the maximum allowed tensile stress. The allowed compression stress for such layers is deemed to be 50\%--60\% of the allowed tensile stress The strength of unidirectional layers in the \textpm 45\textdegree{} or 90\textdegree{} directions is estimated as 10\% of the strength in the 0\textdegree{} direction. This is the 10\%-rule according to \citet{1992WeiEn..52...29H}. %%%%%%%%%%%%%%%%%%%% Eindmaterie %%%%%%%%%%%%%%%%%%%% {{{1 \setsecnumdepth{none} %\include{appendices} \bibliography{lman} \chapter{Colofon} This document has been set with the “TeX Live”\footnote{\url{https://www.tug.org/texlive/}} implementation of the \TeX\footnote{\url{http://nl.wikipedia.org/wiki/TeX}} typesetting software, using the \LaTeX\footnote{\url{http://nl.wikipedia.org/wiki/LaTeX}} macros and specifically the \textsc{memoir}\footnote{% \url{http://www.ctan.org/tex-archive/macros/latex/contrib/memoir/}} style. The main font used for the text is Alegreya\footnote{\url{https://github.com/huertatipografica/Alegreya}}. The \textsf{TeX Gyre Heros}\footnote{\url{http://www.gust.org.pl/projects/e-foundry/tex-gyre}} font is used for sans-serif text, while \texttt{TeX Gyre Cursor} is used for program names and program output. \end{document}
{ "alphanum_fraction": 0.6726131483, "avg_line_length": 39.8726673985, "ext": "tex", "hexsha": "d6c66c44d8342f11cbb489b28ccc67729e8ecc50", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-01-24T21:39:55.000Z", "max_forks_repo_forks_event_min_datetime": "2019-02-15T07:57:38.000Z", "max_forks_repo_head_hexsha": "b1f290f38ae55a3d45e54906cae609e2d0c32374", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "wr1/lamprop", "max_forks_repo_path": "doc/lamprop-manual.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "b1f290f38ae55a3d45e54906cae609e2d0c32374", "max_issues_repo_issues_event_max_datetime": "2022-01-28T11:58:15.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-15T07:43:55.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "wr1/lamprop", "max_issues_repo_path": "doc/lamprop-manual.tex", "max_line_length": 100, "max_stars_count": 14, "max_stars_repo_head_hexsha": "b1f290f38ae55a3d45e54906cae609e2d0c32374", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "wr1/lamprop", "max_stars_repo_path": "doc/lamprop-manual.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-04T08:26:00.000Z", "max_stars_repo_stars_event_min_datetime": "2016-04-17T18:46:11.000Z", "num_tokens": 12077, "size": 36324 }
%% vim: set spell spelllang=en: \documentclass[runningheads]{llncs} \usepackage[latin1]{inputenc} \usepackage{graphicx} \usepackage{xspace} \usepackage{amsmath} \usepackage{amssymb} \usepackage{stmaryrd} \usepackage{ifpdf} \usepackage[all]{xy} \ifpdf % \usepackage{hyperref} \DeclareGraphicsRule{*}{mps}{*}{} \else \fi \pagenumbering{arabic} \newcommand{\scala}{\textsc{Scala}} \newcommand{\pizza}{\textsc{Pizza}} \newcommand{\jmatch}{\textsc{JMatch}} \newcommand{\jbossrules}{\textsc{JBoss Rules}} \newcommand{\jbossel}{\textsc{JBoss-EL}} \newcommand{\jboss}{\textsc{JBoss}} \newcommand{\jsp}{\textsc{JSP}} \newcommand{\jess}{\textsc{Jess}} \newcommand{\jrules}{\textsc{JRules}} \newcommand{\stratego}{\textsc{Stratego}} \newcommand{\jjtraveler}{{JJTraveler}} \newcommand{\maude}{\textsc{Maude}\xspace} \newcommand{\asfsdf}{{ASF+SDF}\xspace} \newcommand{\elan} {\textsf{ELAN}\xspace} \newcommand{\tom}{\textsc{Tom}} \newcommand{\gom}{\textsc{Gom}} \newcommand{\java}{\textsc{Java}} \newcommand{\C}{\textsf{C}} \newcommand{\eclipse}{\textsc{Eclipse}} \newcommand{\ocaml}{\textsc{OCaml}} \newcommand{\ml}{\textsc{ML}} \newcommand{\haskell}{\textsc{Haskell}} \newcommand{\fsharp}{\textsf{F \#}} \newcommand{\lex}[1]{{\textrm{\textbf{#1}}}} \newcommand{\isdef}{\mathrel{\mbox{\small$\stackrel{\mbox{\tiny$\triangle$}}{=}$}}} \def\vs{\vspace{-1.5mm}} \newcommand{\Mu}{{\ensuremath{\mu}}} \newcommand{\ie}{\textit{i.e.}} \newcommand{\wrt}{\textit{wrt.}} \newcommand{\etc}{\textit{etc.}} % code samples \RequirePackage{listings} \lstset{basicstyle={\ttfamily}, keywordstyle={\rmfamily\bfseries}, columns=flexible} \lstdefinelanguage{gom}{ alsoletter={\%}, morekeywords={\%match,module,imports,abstract, syntax,make,make_insert,realMake}, sensitive=true, morecomment=[l]{//}, morecomment=[s]{/*}{*/}, morestring=[b]", } \lstnewenvironment{el}[1][]% {\lstset{frame=tb,#1}} {} \lstnewenvironment{gomcode}[1][]% {\lstset{language={gom},frame=tb,#1}} {} \lstnewenvironment{tomcode}[1][]% {\lstset{language={java}, alsoletter={\%}, morekeywords={\%typeterm,\%op,\%oplist,\%strategy,\%match, is_fsym,get_slot,get_head,get_tail,is_empty,implement,equals, make,make_empty,make_insert,realMake,\%gom,visit, module,imports,abstract,syntax}, frame=tb,#1}} {} \lstnewenvironment{objcode}[1][]% {\lstset{ morekeywords={op,strat}, frame=tb,#1}} {} \lstnewenvironment{asfsdfcode}[1][]% {\lstset{ morekeywords={module,imports,exports,context-free,syntax,equations}, frame=tb,#1}} {} \lstnewenvironment{javacode}[1][]% {\lstset{language={java},frame=tb,#1}} {} \lstnewenvironment{coqcode}[1][]% {\lstset{ morekeywords={Ltac,Inductive,Fixpoint,Theorem,Lemma,match,with,end}, xleftmargin=1em,#1}} {} \lstnewenvironment{elancode}[1][]% {\lstset{ morekeywords={where,end}, frame=tb,#1}} {} \setcounter{secnumdepth}{4} \setcounter{tocdepth}{4} % code samples \newcommand{\comment}[2]{\marginpar{{\small\sf---#1: #2 }}{ \fbox{\sf --#1}}} %\title{Strategic rewriting on existing ASTs} %\title{Analysing and Transforming Third-Party Data-Structures using Data-Abstraction and Strategic Rewriting} \title{Analysing and Transforming Third-Party Data-Structures by combining Data-Abstraction and Strategic Rewriting} %\title{Analysis and Transformation of Third-Party ASTs based on Strategies and ADTs} %\title{A platform for developing tools using strategic programming and ADTs over existing ASTs} \author{Emilie Balland \and Pierre-Etienne Moreau \and Nicolae Vintila} \institute{INRIA \& LORIA,\\ BP 101, 54602 Villers-l{\`e}s-Nancy Cedex France\\ % Domain Specific Consulting Inc. % % Canada \email{\{Emilie.Balland,Pierre-Etienne.Moreau\}@loria.fr,[email protected]} } \begin{document} \maketitle \begin{abstract} We present a language embedded in {\java} dedicated to the manipulation and transformation of structured data. Motivated by the promotion of rule based techniques and their integration in large scale applications, the {\tom} language extends {\java} with the purpose of providing high level constructs inspired by the rewriting community. This paper describes in which way {\tom} is data-agnostic and thus can be easily integrated in existing projects. Matching and strategic rewriting features are usable on external data-structures of existing APIs like Eclipse JDT. To illustrate this aspect, we present the {\jbossel} library and show how we can manipulate {\jbossel} expressions using {\tom} for defining transformation and analysis in a declarative way. \end{abstract} % % outline % ------- % what is the general problem: analysing and transforming existing ASTs % an example: JBoss-EL % what do we provide: a way to describe a mapping (and Tom) % we present mi3 and how easy it is to implement Introspector % application of this technology to JBoss-EL % extension: congruence strategies % \section{Introduction} \section{Tom: strategic rewriting piggybacked on Java} The {\tom} language embed strategic rewriting statements in mainstream languages like {\java} or {\C}. For example, the matching statement is introduced by the \lex{\%match} token and can be used as any {\java} code block. This construction is an extension of the conventional construction \texttt{switch/case}, whose main difference is that the discrimination is based on algebraic \emph{terms} rather than atomic values as integers or characters. A basic example of {\tom} programs is the definition of the addition on Peano integers. Integers are represented by algebraic terms based on the \texttt{zero()} constant and the \emph{successor}, \texttt{suc(x)}, which takes a Peano integer as argument. The addition is defined by the following {\java} method: \medskip \begin{tomcode} public int plus(int t1, int t2) { %match(t1, t2) { x,zero() -> { return `x; } x,suc(y) -> { return `suc(plus(x,y)); } } } public void run() { System.out.println("plus(1,2) = " + plus(1,2)); } \end{tomcode} In this example, given two terms $t_1$ and $t_2$ representing Peano integers, the evaluation of the \texttt{plus} function is implemented by matching: $x$ matches $t_1$ and the $zero()$ and $suc(y)$ patterns match eventually $t_2$. When the $zero()$ pattern matches $t_2$, the result is $x$, which is intanciated by $t_1$. When the $suc(y)$ pattern matches $t_2$, it means that the $t_2$ term is rooted by the $suc$ symbol; the subterm $y$ is added to $x$, and the successor of this term is returned. The backquote construction \texttt{`} enables the construction of new algebraic terms and can reuse variables instantiated by matching. Note that the matching statement proposed by {\tom} is more expressive than ones proposed by functional languages. \comment{Nick}{I believe it would be great to have a dedicated section on comparing Tom's capabilities with other languages. I would include Stratego and Kiama, a new Scala based language that assimilated some Stratego power into Scala. Google for "stratego dynamic rules context" to see this powerful feature in Stratego: maybe this can spark new ideas.} For example, it is possible to use equational matching or negative matching. Moreover, the right hand side of the match statement are not restricted to terms but can be any instruction from the host language. As for the \texttt{switch/case} statement, if the match succeeds, the instructions are executed and if the control flow of the program is not interrupted, the following patterns are tried. The \texttt{plus} method uses the {\java} \texttt{return} statement in order to interrupt the control flow when a pattern succeeds. Even if this \texttt{plus} method is defined by matching, it can be used as any other {\java} method in the surrounding {\java} code. When using rewriting as a programming or modeling paradigm, it is common to consider rewrite systems that are non-confluent or non-terminating. To be able to use them, it is necessary to exercise some control over the application of the rules. In {\tom}, a solution would be to use {\java} to express the control needed. While this solution provides a huge flexibility, its lack of abstraction renders difficult the reasoning about such transformations. Strategies such as \emph{bottom-up}, \emph{top-down} or \emph{leftmost-innermost} are higher-order features that describe how rewrite rules should be applied. We have developed a flexible and expressive strategy language inspired by {\elan}, {\stratego}, and {\jjtraveler}~\cite{visser-oopsla01} where high-level strategies are defined by combining low-level primitives. For example, the \emph{top-down} strategy is recursively defined by \texttt{TopDown(s)}~${\isdef}$~\texttt{Sequence(s,All(TopDown(s)))}. An elementary strategy corresponds to a minimal transformation. It could be \emph{Identity} (does nothing), \emph{Fail} (always fails), or a set of \emph{rewrite rules} (performs an elementary rewrite step only at the root position). In our system, strategies are type-preserving and have a default behavior (introduced by the keyword \texttt{extends}) that can be either \texttt{Identity} or \texttt{Fail}: \begin{tomcode} %strategy R() extends Fail() { visit Nat { zero() -> { return `suc(zero()); } suc(suc(x)) -> { return `x; } } } \end{tomcode} When a strategy is applied to a term~$t$, as in a \texttt{\%match}, a rule is fired if a pattern matches. Otherwise, the default strategy is applied. For example, applying the strategy \texttt{R()} to the term \texttt{suc(suc(zero()))} will produce the term \texttt{zero()} thanks to the second rule. The application to \texttt{suc(suc(suc(zero())))} fails since no pattern matches at root position. More control is obtained by combining elementary strategies with \emph{basic combinators} such as \texttt{Sequence}, \texttt{Choice}, \texttt{All}, \texttt{One} as presented in~\cite{BKK98,visser-icfp98}. By denoting \texttt{s[t]} the application of the strategy~\texttt{s} to the term~\texttt{t}, the \emph{basic combinators} are defined as follows: \begin{small} \begin{tabular}{lll} \texttt{Sequence(s1,s2)[t]} & $\rightarrow$ & \texttt{s2[t']} if \texttt{s1[t]} $\rightarrow$ \texttt{t'}\\ && failure if \texttt{s1[t]} fails\\ \texttt{Choice(s1,s2)[t]} & $\rightarrow$ & \texttt{t'} if \texttt{s1[t]} $\rightarrow$ \texttt{t'}\\ && \texttt{s2[t']} if \texttt{s1[t]} fails\\ \texttt{All(s)[f(t1,\ldots,tn)]} & $\rightarrow$ & \texttt{f(t1',\ldots,tn')} if \texttt{s[t1]} $\rightarrow$ \texttt{t1'},\ldots, \texttt{s[tn]} $\rightarrow$ \texttt{tn'}\\ && failure if there exists \texttt{i} such that \texttt{s[ti]} fails\\ \texttt{One(s)[f(t1,\ldots,tn)]} & $\rightarrow$ & \texttt{f(t1,\ldots,ti',\ldots,tn)} if \texttt{s[ti]} $\rightarrow$ \texttt{ti'}\\ && failure if for all \texttt{i}, \texttt{s[ti]} fails\\ \end{tabular} \end{small} An example of composed strategy is \texttt{Try(s)}~${\isdef}$~\texttt{`Choice(s,Identity())}, which applies~\texttt{s} if it can, and performs the \textit{Identity} otherwise. To define strategies such as \emph{repeat}, \emph{bottom-up}, \emph{top-down}, {\etc} recursive definitions are needed. For example, to repeat the application of a strategy~\texttt{s} until it fails, we consider the strategy \texttt{Repeat(s)}~${\isdef}$~\texttt{Choice(Sequence(s,Repeat(s)),} \texttt{Identity())}. In {\tom}, we use the recursion operator~$\Mu$ (comparable to \texttt{rec} in {\ocaml}) to have stand-alone definitions: $\Mu$\texttt{x.Choice(Sequence(s,x),Identity())}. The \texttt{All} and \texttt{One} combinators are used to define tree traversals. For example, we have \texttt{TopDown(s)}~${\isdef}$~$\Mu$\texttt{x.Sequence(s,All(x))}: the strategy \texttt{s} is first applied on top of the considered term, then the strategy \texttt{TopDown(s)} is recursively called on all immediate subterms of the term. Strategy expressions can have any kind of parameters. It is useful to have a {\java} \texttt{Collection} as parameter in order to collect information. For example, let us consider the following strategy which collects the direct subterms of an~$f$. This program creates a hash-set, and a strategy applied to \texttt{f(f(a()))} collects all the subterms which are under an~\texttt{f}: {\ie} $\{\texttt{a()}, \texttt{f(a())}\}$. \begin{tomcode} %strategy Collect(c:Collection) extends Identity() { visit T { f(x) -> { c.add(`x); } } } Collection bag = new HashSet(); `TopDown(Collect(bag)).apply( `f(f(a())) ); \end{tomcode} \section{Mapping existing ASTs} In this section, we will present how the {\tom} language can be directly used on any Java objects using a mechanism of mappings. In particular, we will show that it can be used to manipulate in a direct way ASTs like Eclipse's JDT. \comment{Nick}{for Eclipse it seems more complicated because of the special APIs Eclipse has for refactoring. I can send some links if you'd like.} \subsection{Mapping data-structures} In the previous example, we can notice that the {\java} \texttt{plus} method take two parameters of type \texttt{int} while the match statement is specified on the {\tom} type \texttt{Nat} composed of the \texttt{zero} and \texttt{suc} constructors. This shows an important specificity of the {\tom} language : the matching compilation is implemented independently of the concrete implementation of the terms. In fact, the match constructs are expressed in function of algebraic data types and the compiler translates these statements by manipulation on the concrete data types that represent the algebraic terms. {\tom} users have to specify the relation (\emph{mapping}) between the {\tom} algebraic data types and the concrete {\java} types using dedicated {\tom} syntactic constructs. The \lex{\%typeterm}, \lex{\%op} and \lex{\%oplist} enable to respectively describe the implementation of algebraic types, constructors and list constructors. Now, we will illustrate these constructions by defining the mapping for Peano integers and the {\java} primitive type \texttt{int}. First, the \lex{\%typeterm} construction specifies an algebraic {\tom} type and its concrete {\java} type. The user has also to declare how testing the equality between two terms using the concrete representations. \medskip \begin{tomcode}[morekeywords={equals}] %typeterm Nat { implement { int } equals(t1,t2) { t1 == t2 } } \end{tomcode} The \texttt{nat} type is declared by the \lex{\%typeterm} construction and is implemented by the \texttt{int} type. The mapping also specifies that the equality tests of two \texttt{Nat} terms can be achieved simply by comparison of their concrete representations (using \texttt{==}). Operators of this kind are defined using the construction \lex{\%op} which allows to specify both how \emph{building} and \emph{destroying} (in the sense of decomposing) a term whose head is the symbol of the declared operator. We can define the \texttt{zero} and \texttt{suc} constructors as follows: \begin{tomcode} %op Nat zero() { is_fsym(i) { i==0 } make() { 0 } } %op Nat suc(p:Nat) { is_fsym(i) { i>0 } get_slot(p,i) { i-1 } make(i) { i+1 } } \end{tomcode} The first line of each \lex{\%op} construction defines the signature of the algebraic operator, and the names of its arguments. The \lex{is\_fsym} construction used to test whether an object represents a term whose head symbol is the operator and the \lex{get\_slot} construction can extract the various sub-terms according to their behalf. Both buildings are the destructive part of the mapping used to compile the match statement. The \texttt{make} construction is used to specify how to construct a term whose head symbol is this operator and whose subterms are given as parameters. Variadic operators (unfixed arity) are defined similarly using the \lex{\%oplist} construction. The first line specifies the domain and co-domain of the operator, as its name suggests. Specific operations to lists must be defined, they are used to compile the filtering list. Mappings specifies how to construct an empty list, and insert a new element at the top of a list. It also details how deconstructing a list, by separating the head element and the rest of the list. \begin{tomcode}[morekeywords={equals}] %typeterm NatList { implement { MyIntList } equals(l1,l2) { l1.equals(l2) } } %oplist NatList conc( Nat* ) { is_fsym(t) { t instanceof MyIntList } make_empty() { new MyIntList() } make_insert(e,l) { new MyIntList(e,l) } get_head(l) { l.get(0) } get_tail(l) { l.sublist(1,l.size()) } is_empty(l) { l.isEmpty() } } \end{tomcode} The definition \lex{\%oplist} for the list of \texttt{Nat} elements is implemented by the class \texttt{MyIntList} which extends \texttt{ArrayList<integer>}. It offers functions to construct an empty list and a list from an element representing his head and a list corresponding to the tail. \medskip A mapping should not necessarily be complete. It is sometimes useful to specify only the destructive part. For example, the {\tom} runtime library provides mappings for partial standard implementations of the {\java} collection library (\texttt{java.util} package) which can be used to match any {\java} collection in a declarative way. For example, we define the following type mapping for the {\java} \verb+List+ interface: \begin{tomcode} %typeterm List { implement { java.util.List } is_sort(t) { $t instanceof java.util.List } equals(l1,l2) { $l1.equals($l2) } } \end{tomcode} Thus we can define this partial list constructor mapping to match any objects of type \verb+List+: \begin{tomcode} %oplist List conc( Object* ) { is_fsym(t) { t instanceof java.util.List } get_head(l) { l.get(0) } get_tail(l) { l.sublist(1,l.size()) } is_empty(l) { l.isEmpty() } } \end{tomcode} \emph{Add a simple example with non-linearity on lists} \comment{Nick}{As you know, I did not understand well where each of these ADT APIs are used in the Tom generated code.} \comment{Nick}{ For example when is a deep clone needed and when not, is maximal sharing or immutability a strong requirement? Should I make a new term when a child is set or can I just call a setter in the native AST? It would be great to have a section about this in detail!!} \subsection{Strategic rewriting for existing data-structures} In term rewriting, a strategy is a way to control the application of a set of rules on a term. The strategy specifies which rules are applied and at which positions. We argue that strategy languages are also well-adapted for collecting information inside a complex tree structure. Thus the combination of pattern-matching and strategies can greatly facilitate the development of code analysers. {\tom} offers a strategy language smoothly integrated into {\java}, flexible enough to express complex traversals and to collect information using {\java} collections. This language is influenced by the strategy languages offered by {\elan} and {\stratego}. One of its main characteristics is its data-structure independence. Any {\java} data-structure can be traversed thanks to {\tom} mappings presented in previous examples. \comment{Emilie}{Present introspectors} \section{{\jbossel} analysis} {\jbossel} is an extension of the Unified Expression Language (EL) proposed by \texttt{Sun} for manipulating datas from Java Server Pages ({\jsp}). Expressions can call Java methods and evaluate simple arithmetic and boolean expressions. As these expressions are simply strings, {\jboss} offers a parser in order to manipulate them as abstract syntax trees. We will present in this Section how we can directly map the AST given by {\jboss} and describe simple transformation and analysis in a declarative way. \subsection{Brief description of {\jbossel}} In a {\jsp} page, expressions are enclosed by \texttt{\$\{\}}. When rendering the page, expressions are dynamically evaluated. For example: \begin{el} <c:if test="${car.price > 1000}" > ... </c:if> \end{el} In this code, we assume that an instance of the \verb+Car+ {\java} Bean lives in some scope such as session scope or application scope. The instance is bound to the name \texttt{car} and is accessible from a request attribute (equivalent to the {\jsp} code \verb+request.getAttribute("car")+). The \verb+${car.price > 1000}+ expression is evaluated dynamically and the corresponding result (which can be \verb+true+ or \verb+false+ strings) takes the place of the expression. {\jbossel} provides an extension to the Unified Expression Language (EL). For example, it allows programmers to use a method with user defined parameters (as in \verb+${car.setPrice(price)}+) and also offers a mechanism for projection (the result of \verb+#{cars.{c|c.price()}}+ is the \verb+cars+ list' prices). This extended language is used in {\jboss} products such as {\jboss} Seam and will illustrate the mechanism of {\tom} mappings. In fact. {\jboss} provides a parsing library for constructing the AST corresponding to an expression. Our goal is to realize security analysis directly on these ASTs. The main interest of our approach is that we can directly use the {\tom} language on existing structures. In this way, analysis or transformations can be expressed in a declarative way without coding by hand complex visitors or translating structures for using other rule-based languages. \subsection{Mapping {\jbossel} trees} \subsection{{\jbossel} transformation and analysis} \subsubsection{Optimizing by rule-based transformation} \subsubsection{Eliminate hardcoded strings in UI code} As an example, suppose we want to replace the use of text literals with calls to \verb+bundle.get("...")+ in order to facilitate internationalization. For example, suppose we have the following code: \begin{el} ${cars.getRowCount()} Cars' total price = $ ${cars.getTotalPrice()} \end{el} It will be replaced by: \begin{el} ${cars.getRowCount()} ${bundle.get("Cars' total price"}} = $ ${cars.getTotalPrice()} \end{el} After this transformation, the developer can fix it manually by properly placing text in resource bundles which can thus be internationalized. In {\tom}, such transformation is implemented by identifying literals using pattern-matching and replace it by a function call on bundle. The corresponding {\tom}+{\java} code realizes completely this transformation: \begin{tomcode} %strategy ReplaceLiteral() extends Identity() { visit Expr { Literal(s) -> FunctionCall(Identifier("bundle"),"get",Literal(s)) } } public String internationalize(String expression) { Node ast = ELParser.parse(expression); Node new_ast= `BottomUp(ReplaceLiteral()).visit(node); return new_ast.getExpression(); } \end{tomcode} First we define the transformation rule using the \verb+%strategy+ construct. This basic transformation is identified by the \texttt{ReplaceLiteral} name and can now be applied as a basic strategy. As we want to apply this improvement in the whole AST, the {\java} method named \texttt{internationalize} applies this rule in a bottom-up way (using the {\tom} strategy language) on the AST of its expression argument. The result of the method is a new expression of type \texttt{String}. Note that if we replace the \verb+BottomUp+ strategy by a \verb+TopDown+, the program does not terminate because the right-hand side of the rule contains \verb+Literal(s)+ which means that the transformation can be fired again. \subsubsection{List beans read and/or written in a screen} \comment{Nick}{Detecting writes would require also processing the (X)HTML around the EL to detect EL inside forms. Another aspect of this is detecting if the display of the page causes state mutations.} \subsubsection{List controller actions} \comment{Nick}{Detect method calls from links and buttons inside forms to determine which controller layer apis are called} \subsubsection{Retrieving information by pattern-matching} An other simple example of {\jbossel} analysis consists in identifying all method calls that do not start with \emph{get} as they are suspects for code that mutates the state of the application during rendering of the page. In this example, we will use a {\java} collection for collecting each suspect call. In this case, we will use strategies to collect information in the AST: \begin{tomcode} %strategy CollectSuspectCall(bag:Set) extends Identity() { visit Expr { f@FunctionCall[name=!concString('get',_*)] -> { bag.add(`f); } } } public Set collectSuspectCalls(String expression) { Node ast = ELParser.parse(expression); Set bag = new HashSet(); `TopDown(CollectSuspectCall(bag)).visit(node); return bag; } \end{tomcode} \section{Related Works} Compared to other term rewriting based languages, like {\asfsdf}, {\maude}, {\elan}, {\stratego} an important advantage of {\tom} is its seamless integration in any {\java} project. Other languages provide pattern-matching extensions for {\java}: {\scala}, {\pizza}, {\jmatch}. To our knowledge, they only provide a basic pattern-matching. More specifically, they lack the list-matching, as well as the negative conditions. Other rule-based languages like {\jrules}, {\jbossrules} or {\jess} have \emph{business rules} as their application domain, and not program transformation. The mapping concept proposed in~{\tom} is based on Philip Wadler's \emph{views}~\cite{wadler87}. This concept gave rise to the {\pizza} language~\cite{odersky97pizza} which is an extension of the {\java} language to algebraic structures. In addition to the algebraic data types, this language provided generics, closures and matching. The matching construction and the algebraic data structures are less expressive than the language proposed by {\tom} (for example, there is no matching modulo a theory and the data structures do not have maximal sharing or invariant). Generalist languages like {\fsharp}~\cite{fsharp} and \scala~\cite{scala} offer primitives for the definition of \emph{views}. These constructions are called respectively \emph{active patterns} in {\fsharp}~\cite{syme07} and \emph{extractors} in {\scala}~\cite{emir07}. Defining views allow to deconstruct a same object in different ways by changing the behaviour of the matching statement. This type of views only allow to specify the destructive part of the mapping. As for the \emph{extractors} of {\scala}, {\tom} mappings allow in addition to build data structures through the same abstractions used for matching. \section{Conclusion} \bibliographystyle{splncs} \bibliography{paper} \end{document}
{ "alphanum_fraction": 0.7440731753, "avg_line_length": 41.2076923077, "ext": "tex", "hexsha": "0ed137dbb608ef03dc7d3fec8191352abbc6b6d7", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-03-12T14:46:21.000Z", "max_forks_repo_forks_event_min_datetime": "2017-11-30T17:07:10.000Z", "max_forks_repo_head_hexsha": "2918e95c78006f08a2a0919ef440413fa5c2342a", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "rewriting/tom", "max_forks_repo_path": "applications/poc/jboss-el/paper/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "2918e95c78006f08a2a0919ef440413fa5c2342a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "rewriting/tom", "max_issues_repo_path": "applications/poc/jboss-el/paper/paper.tex", "max_line_length": 123, "max_stars_count": 36, "max_stars_repo_head_hexsha": "2918e95c78006f08a2a0919ef440413fa5c2342a", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "rewriting/tom", "max_stars_repo_path": "applications/poc/jboss-el/paper/paper.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-03T13:13:21.000Z", "max_stars_repo_stars_event_min_datetime": "2016-02-19T12:09:49.000Z", "num_tokens": 7163, "size": 26785 }
\documentclass{beamer} \usepackage[utf8]{inputenc} \defbeamertemplate{description item}{align left}{\insertdescriptionitem\hfill} \usepackage{hyperref} \hypersetup{ colorlinks=true, linkcolor=blue, filecolor=magenta, urlcolor=cyan, } %Information to be included in the title page: \title{311 Social Distancing NYC} \subtitle{Exam project for Management of Scientific Data} \author{Anna Sterzik} \institute{Friedrich Schiller Universität Jena} \date{\today} \begin{document} \frame{\titlepage} \begin{frame} \frametitle{Table of Contents} \tableofcontents \end{frame} \section{Data Management Plan} \begin{frame} \frametitle{Project Information} \setbeamertemplate{description item}[default] \begin{description}[style=multiline] \item[Project name:] 311 Social Distancing NYC \vfill \item[Creator:] Anna Sterzik \vfill \item[Affiliation:] Friedrich Schiller University Jena \vfill \item[Template:] DCC Template %Project abstract: %The 311 Service Requests in New York City will be used to investigate how the acceptance of Social Distancing %measures evolved during the Covid-19 pandemic in New York City. Therefore violations of Social Distancing rules %will be analysed over time. Last modified: 10-08-2020 \item The tool \href{https://dmponline.dcc.ac.uk/}{DMPonline} was used \end{description} \end{frame} \begin{frame} \frametitle{Preexisting Data} \begin{itemize} \item Pre-existing data from \href{https://data.cityofnewyork.us/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9}{311 Service Requests from 2010 to Present}. \vfill \item Initial Data Filtering: \setbeamertemplate{description item}[align left] \begin{description}[style = multiline] \item [Description:] Including "Social Distancing" \end{description} \vfill \item raw data volume: 32.8 MB \vfill \item Data Format: CSV \vfill \item Open Data \url{https://opendata.cityofnewyork.us/faq/} \vfill \item Accessed/Downloaded 2020-08-11 \end{itemize} \end{frame} \begin{frame} \frametitle{Generated Data} \begin{itemize} \item Data Quality will be monitored using \href{https://openrefine.org/}{OpenRefine}. For every version of refined data the OpenRefine project will be saved together with a version number. \vfill \item Data will be analyzed and visualized using jupyter notebooks. \vfill \item Formats: TXT, JSON, PDF, PNG, TEX, IPYNB \vfill \item Everything apart from raw data will be put under version control by using git. \end{itemize} \end{frame} \begin{frame} \frametitle{Documentation and Metadata} \begin{itemize} \item Software versions used for this project: \setbeamertemplate{description item}[align left] \begin{description} \item[OpenRefine:] 3.3 \item[Python: ] 3.7.4 \item[Pandas: ] 0.25.1 \item[Jupyter: ] 1.0.0 \item[Matplotlib: ] 3.1.1 \item[Numpy: ] 1.17.4 \end{description} \vfill \item Documentation will be provided as a README \vfill \item Provenance for Data Cleansing by usage of OpenRefine \vfill \item Provenance for Jupyter Notebooks will be handled by \href{https://github.com/Sheeba-Samuel/ProvBook}{ProvBook} \end{itemize} \end{frame} \begin{frame} \frametitle{Storage and Backup} \begin{itemize} \item Project will be hosted on github and additional backup will be with URZ and on a USB stick \vfill \item Data will be available for everyone at all times via github. \end{itemize} \end{frame} \begin{frame} \frametitle{Selection, Preservation and Sharing} \begin{itemize} \item The created software for analysis as well as the steps during data cleaning are essential part. The third party data is already preserved. \vfill \item The project will be hosted on github and will be available under a MIT licence. \end{itemize} \end{frame} \begin{frame} \frametitle{Resources} \begin{itemize} \item The only ressources required are storage capacity from URZ. \end{itemize} \end{frame} \section{Description of the Dataset} \begin{frame} \frametitle{Description of the Dataset} 311 Service Requests in New York City from 2010 to present \vfill \begin{itemize} \item Non-emergency social service requests \vfill \item Provider: DoITT Department of Information Technology \& Telecommunications \vfill \item Owner: NYC OpenData \vfill \item There are 41 columns in the dataset, they include but are not limited to unique key, information about time, agency, complaint type, location information \item Each row is a service request \end{itemize} \end{frame} \section{Quality Control} \begin{frame} \frametitle{Quality Control} \begin{itemize} \item Quality control will be done using \href{https://openrefine.org/}{OpenRefine} \vfill \item The database states:\\ ``NOTE: This data does not present a full picture of 311 calls or service requests, in part because of operational and system complexities associated with remote call taking necessitated by the unprecedented volume 311 is handling during the Covid-19 crisis. The City is working to address this issue.'' \vfill \item One can also see at first glance that there are several missing values \end{itemize} \end{frame} \begin{frame} \frametitle{Facets} \begin{columns} \column{0.5\textwidth} Facets can be used to get a better overview over the data in specific columns. The Complaint Types and Agency Names seem to be reasonible. \column{0.5\textwidth} \includegraphics[width = 0.8\textwidth]{pictures/facets.png} \end{columns} \end{frame} \begin{frame} \frametitle{Clustering} Clustering is another option to identify erronous data, especially spelling mistakes. \vfill \includegraphics[width = \textwidth]{pictures/clustering.png} \end{frame} \begin{frame} \frametitle{Sorting} Using OpenRefine one can also sort the values by certain columns. That way one can e.g. determine if the given longitudes and latitudes are reasonable. Here the latitudes and longitudes seem to be valid for NYC. \vfill The same can be done for the dates. The creating dates for example start with 2020-03-28 and end with 2020-08-10. This seems to be right as well, because PAUSE started at 2020-03-22. \vfill \includegraphics[width=0.5\textwidth]{pictures/sorting.png} \end{frame} \begin{frame} \frametitle{Saving} OpenRefine projects can be exported. The resulting files do only contain TXT files and JSON files. These files describe all changes made with the data. \end{frame} \section{Data analysis} \begin{frame} \frametitle{Data Analysis} Data analysis will be done using pandas library in a jupyter notebook environment. \end{frame} \begin{frame} \frametitle{Number of Service Calls about 'Social Distancing' in calendar weeks} \includegraphics[width=\textwidth]{pictures/bar.png} \end{frame} \begin{frame} \frametitle{Comparison of 'Social Distancing' Service Calls in Bronx and Manhattans} \includegraphics[width=\textwidth]{pictures/comp_bronx_manhattan.png} \end{frame} \section{Preservation and Publishing} \begin{frame} \begin{itemize} \frametitle{Preservation and Publishing} \item Publishing on Github: \url{github.com/azuki-monster/311-Service-Calls-NYC} \vfill \item Backup copies with the URZ and a USB drive as well \vfill \item Material available on Github under a MIT Licence \end{itemize} \end{frame} \end{document}
{ "alphanum_fraction": 0.7836814804, "avg_line_length": 32.4227272727, "ext": "tex", "hexsha": "38c35baf00a47813332f50b04e9f68f17527bcd4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0eca75b47bb6c6cfa0c65ca01874c146f751c54c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "azuki-monster/311-Service-Calls-NYC", "max_forks_repo_path": "tex/presentation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0eca75b47bb6c6cfa0c65ca01874c146f751c54c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "azuki-monster/311-Service-Calls-NYC", "max_issues_repo_path": "tex/presentation.tex", "max_line_length": 303, "max_stars_count": null, "max_stars_repo_head_hexsha": "0eca75b47bb6c6cfa0c65ca01874c146f751c54c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "azuki-monster/311-Service-Calls-NYC", "max_stars_repo_path": "tex/presentation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1951, "size": 7133 }
\documentclass{book} \usepackage[pdftex]{graphicx} \usepackage[pdftex]{hyperref} \usepackage{fancyhdr} %---------- fonts Type 1 ----------------- %\usepackage{times} %\usepackage[T1]{fontenc} %\usepackage{textcomp} %------------------------Page set-up----------------------------------------- \renewcommand{\baselinestretch}{1.25} \setlength{\hoffset}{-1in} \setlength{\oddsidemargin}{3.5cm} \setlength{\evensidemargin}{3.5cm} \setlength{\topmargin}{0cm} \setlength{\footskip}{2cm} \setlength{\headheight}{14pt} \setlength{\marginparwidth}{0cm} \setlength{\marginparsep}{0cm} \setlength{\marginparpush}{0cm} \setlength{\textwidth}{15cm} \setlength{\parindent}{0cm} \setlength{\parskip}{0.75\baselineskip} %------------------------------------------------------------------------------ %---- change link style ---- \hypersetup{colorlinks, linkcolor=blue, pdfstartview={FitH}} % Pages and Fancyheadings stuff %----------------------------------------------------------------------- \cfoot{\thepage} \fancyhead[LE,RO]{} \fancyhead[LO]{\nouppercase{\scshape\rightmark}} \fancyhead[RE]{\nouppercase{\scshape\leftmark}} %----------------------------------------------------------------------- % boxes \newsavebox{\fmboxb} \newenvironment{mybox} {\vspace{-2mm}\begin{center}\begin{lrbox}{\fmboxb}\hspace{2mm} \begin{minipage}{0.85\textwidth} \vspace{2mm}\small} { \vspace{2mm} \end{minipage} \hspace{2mm}\end{lrbox}\fbox{\usebox{\fmboxb}}\end{center}} %----------------- TITLE -------------- \title{\Huge \bfseries PRADO v3.2 Quickstart Tutorial \thanks{Copyright 2004-2009. All Rights Reserved.} } \author{Qiang Xue and Wei Zhuo} \date{\today} %-------------- BEGIN DOCUMENT ------------------ \begin{document} \maketitle \pagestyle{plain} \addcontentsline{toc}{chapter}{Contents} \pagenumbering{roman} \tableofcontents \chapter*{Preface} \addcontentsline{toc}{chapter}{Preface} Prado quick start doc \chapter*{License} \addcontentsline{toc}{chapter}{License} PRADO is free software released under the terms of the following BSD license.\\ Copyright 2004-2013, The PRADO Group (http://www.pradosoft.com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: \begin{enumerate} \item Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. \item Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. \item Neither the name of the PRADO Group nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. \end{enumerate} \begin{verbatim} THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \end{verbatim} \newpage \pagestyle{fancyplain} \pagenumbering{arabic} \include{ch1} \include{ch2} \include{ch3} \include{ch4} \include{ch5} \include{ch6} \include{ch7} \include{ch8} \include{ch9} \include{ch10} \include{ch11} \include{ch12} \include{ch13} \include{ch14} \include{ch15} \include{ch16} \include{ch17} \include{ch18} \include{ch19} \include{ch20} \end{document}
{ "alphanum_fraction": 0.6973455718, "avg_line_length": 28.7928571429, "ext": "tex", "hexsha": "5315ac17feeaecf0923cc8c3c99d6e1a477f2291", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2021-04-05T12:03:26.000Z", "max_forks_repo_forks_event_min_datetime": "2016-04-21T16:47:53.000Z", "max_forks_repo_head_hexsha": "e36e4884deac94030c34a222c2a43b93567c81ad", "max_forks_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_forks_repo_name": "tel8618217223380/prado3", "max_forks_repo_path": "buildscripts/texbuilder/quickstart/quickstart.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "e36e4884deac94030c34a222c2a43b93567c81ad", "max_issues_repo_issues_event_max_datetime": "2016-04-21T16:54:33.000Z", "max_issues_repo_issues_event_min_datetime": "2016-04-21T16:54:33.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_issues_repo_name": "tel8618217223380/prado3", "max_issues_repo_path": "buildscripts/texbuilder/quickstart/quickstart.tex", "max_line_length": 80, "max_stars_count": 3, "max_stars_repo_head_hexsha": "e36e4884deac94030c34a222c2a43b93567c81ad", "max_stars_repo_licenses": [ "BSD-3-Clause-Clear" ], "max_stars_repo_name": "tel8618217223380/prado3", "max_stars_repo_path": "buildscripts/texbuilder/quickstart/quickstart.tex", "max_stars_repo_stars_event_max_datetime": "2020-05-15T12:13:18.000Z", "max_stars_repo_stars_event_min_datetime": "2016-09-01T03:40:08.000Z", "num_tokens": 1072, "size": 4031 }
\section{201703-3} \input{problem/10/201703-3-p.tex}
{ "alphanum_fraction": 0.7358490566, "avg_line_length": 17.6666666667, "ext": "tex", "hexsha": "d8891822c2bfbb2c30f146c93091b247c0490cde", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "xqy2003/CSP-Project", "max_forks_repo_path": "problem/10/201703-3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "xqy2003/CSP-Project", "max_issues_repo_path": "problem/10/201703-3.tex", "max_line_length": 33, "max_stars_count": 1, "max_stars_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "xqy2003/CSP-Project", "max_stars_repo_path": "problem/10/201703-3.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:47:19.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-14T01:47:19.000Z", "num_tokens": 22, "size": 53 }
\documentclass[letterpaper,10pt, draftclsnofoot,onecolumn]{IEEEtran} \usepackage{graphicx} \usepackage{amssymb} \usepackage{amsmath} \usepackage{amsthm} \usepackage{alltt} \usepackage{float} \usepackage{color} \usepackage{url} \usepackage{balance} \usepackage{enumitem} \usepackage{geometry} \geometry{textheight=8.5in, textwidth=6in} \newcommand{\cred}[1]{{\color{red}#1}} \newcommand{\cblue}[1]{{\color{blue}#1}} \usepackage{hyperref} \usepackage{geometry} \usepackage{titling} \usepackage{bookmark} \title{Capstone Fall 2017 Problem Statement} \def\authors{Logan Wingard} \date{October 9, 2017} \author{\authors} \hypersetup{ colorlinks = true, urlcolor = blue, pdfauthor = {\authors}, pdfkeywords = {Capstone Fall 2017 Problem Statement}, pdftitle = {Capstone Fall 2017 Problem Statement}, pdfsubject = {Problem Statement}, pdfpagemode = UseNone } \begin{document} \begin{titlepage} \maketitle \centering \begin{abstract} This document will discuss the problem statement of project Aerolyzer. This will include the main goal of Aerolyzer, the smaller problems that we will need to solve to reach this goal, and the proposed solutions to these problems as well as the preformance metrics. \end{abstract} \end{titlepage} \hrulefill \section{Problem Statement} The two main goals of project Aerolyzer are as follows: \begin{enumerate} \item Deliver an image classifier using OpenCV, Tensor flow by google, and preexisting machine learning tools to be able to detect fog, dust, vibrant sunsets/rises, with a priority on air quality. \item Relay information collected about air quality and weather back to end users. \end{enumerate} Some of the problems we need to tackle first include the following: \begin{enumerate} \item Use OpenCV to detect that a skyline/landscape is in the image \item Use a photograph's data to determine the location and time the photograph was taken. \item Use the data to better analyze air quality and also to determine if an image is of a sunset or sunrise. \end{enumerate} \hrulefill \section{Proposed Solutions} Many of the tools we will need to solve these problems are out there through resources such as weatherground.com, OpenCV, and Tensor flow by google, and we just need to find a way to put these to use in a user friendly UI. We will be using Python 2.7 and Django to accomplish this goal. Communication throughout the project will aide in working through problems. \hrulefill \section{Preformance metrics} The finished product will be a completed app that will receive data from a photograph and relay air quality and weather information back to the user. This, however, will be the final step. We will have several goals to reach along the way such as successfully detecting skylines/landscapes. Then, we will reach the goal of detecting information from a photo, such as location and time. \end{document}
{ "alphanum_fraction": 0.7128900095, "avg_line_length": 38.6951219512, "ext": "tex", "hexsha": "b634b3446351f34aa2d5153876b3285d003f49d1", "lang": "TeX", "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2017-11-24T21:32:02.000Z", "max_forks_repo_forks_event_min_datetime": "2016-10-21T11:03:16.000Z", "max_forks_repo_head_hexsha": "f6152d79569c8d061b167a72c2f51860dcb605b6", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "liusop/Aerolyze", "max_forks_repo_path": "docs/2017TeamDocs/wingarlo/problemstatement.tex", "max_issues_count": 102, "max_issues_repo_head_hexsha": "f6152d79569c8d061b167a72c2f51860dcb605b6", "max_issues_repo_issues_event_max_datetime": "2018-05-24T00:58:08.000Z", "max_issues_repo_issues_event_min_datetime": "2016-10-21T11:01:35.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "liusop/Aerolyze", "max_issues_repo_path": "docs/2017TeamDocs/wingarlo/problemstatement.tex", "max_line_length": 386, "max_stars_count": 9, "max_stars_repo_head_hexsha": "16c91740ba561b988e67fdcd6ef802ed8a826da2", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "wingarlo/Aerolyzer", "max_stars_repo_path": "docs/2017TeamDocs/wingarlo/problemstatement.tex", "max_stars_repo_stars_event_max_datetime": "2017-12-04T05:05:36.000Z", "max_stars_repo_stars_event_min_datetime": "2016-10-21T22:19:20.000Z", "num_tokens": 747, "size": 3173 }
\section{Discussion} The goal of this chapter is to provide an overview of both the theoretical and practical results and to give detailed comparisons and a trade-off analysis of the algorithms. \subsection{Discussion of Theoretical and Practical Results} Drawing back to the theoretical results from Table \ref{tab:algorithm-comparison}, we have already seen that none of the algorithms produce matchings will all of the desired criteria. In particular, strategy-proofness cannot be achieved if trying to maximize a matching's cardinality. Popular-CHA, which theoretically is strategy-proof, unfortunately does not always find a matching, which makes it less suitable for real-world applications. Both the results from Diebold et al. (Section \ref{sec:practical-results-lit}) and our experiment show that Popular-CHA fails for about 90\% of the tested instances. There is also no known algorithm that efficiently computes the popular matching if it is not of maximum cardinality, which makes it questionable as to whether or not popularity should be considered when designing such a matching system. The results from Table \ref{tab:results-preflib2} and \ref{tab:results-uniform-large-complete} show that the matchings produced by Popular-CHA and the Hungarian algorithm often tie in terms of popularity, even though the Hungarian algorithm clearly performs better on other metrics. Looking at the rank distributions of some matchings in Figure \ref{fig:preflib2-rank-distribution} and \ref{fig:zipfian-distribution}, we have seen that (Mod-) Popular-CHA attempts to maximize the number of students assigned to their first choice in order to find a popular matching. It is highly questionable if this property is desirable over having a matching with a lower rank average, and especially a lower rank standard deviation, which is what the Hungarian algorithm typically finds. For this reason, we would argue that popularity as an optimality criterion should not be of high importance when selecting a matching mechanism for the CHA problem. The results have shown that popular matchings, compared to profile-based optimal matchings, do not explicitly attempt to provide a good match for every student, but instead give some students a bad or no match in order to give the majority of students a better match. Another surprising finding from our experiments is that Max-PaCHA usually performs worse than all other algorithms on the rank metrics, including RSD. This can easily be explained by the fact that Max-PaCHA essentially performs a local search from an initial (maximum-cardinality) matching and terminates when finding one and only one local optima. Contrary to that, the greedy approach (RSD) also performs a local search; however, it achieves better average ranks due to the fact that it does not attempt to maximize the matching's cardinality. Comparing RSD and Max-PaCHA to the other algorithms, the Hungarian algorithm and Mod-Popular-CHA should be preferred due to the fact that they simply perform better on almost all other metrics in the experiments. However, if using a strategy-proof mechanism is a requirement, RSD should obviously be used. \subsection{Potential Algorithmic Improvements}\label{sec:improvements} In Section \ref{impl:mod-max-pop}, we briefly discussed a modified version of the Popular-CHA algorithm that was used in the experiment to gain insights on failing instances of Popular-CHA. However, the modified algorithm leaves some room for optimization, as it does not attempt to match students that were either unmatched or assigned to their last-resort preference after performing the maximum cardinality matching. A simple way of improving this algorithm is to use one of the other algorithms (e.g. RSD) on that unmatched subset of students. Doing this would not worsen the matching's popularity, because being matched to any seminar is better than not being matched at all from a popularity perspective. Using this modified mechanism could be a good alternative to using the Hungarian algorithm when using large instances where runtime could be a problem. Besides that, different algorithmic approaches could be used for computing a profile-based optimal matching. In Section \ref{algo:assignment}, we presented the approach of reducing the matching problem to the assignment problem or by using flow-networks to find a rank-maximal matching. For the use case of student and seminar matchings, it can be assumed that instances will be relatively small; however, the experiments have also shown that the Hungarian algorithm's runtime is quite poor compared to the other algorithms when using larger instances. For instance, when using about 5000 students (Table \ref{tab:results-uniform-large} and \ref{tab:results-uniform-large-complete}), the Hungarian algorithm took, on average, about 60 seconds to find a matching, compared to about 230ms for the Popular-CHA mechanism. A faster algorithm for finding a profile-based optimal matching, Rank-Max, is presented by Sng et al. \cite{SngThesis}. They show that for a given instance $I$ of the CHAT (with ties) problem, the algorithm finds a rank-maximal matching in $\mathcal{O}(\min(z^*\sqrt{C}, C + z^*)m)$, where $z^*$ is the maximal rank of an edge in an optimal solution of $I$, $C$ is the total capacity of the houses in $I$ and $m$ is the sum of the lengths of all preference lists in $I$. Compared to that, the version of the Hungarian algorithm used for the experiments has a runtime of $\mathcal{O}((C+n_1)^3)$ with $n_1$ being the number of students and $C$ again being the total capacity of all seminars. While the theoretical runtime of Rank-Max is much better than of the Hungarian algorithm, we have seen that for small, real-world instances like the PrefLib datasets (Table \ref{tab:results-preflib1}), the runtime of the Hungarian algorithm is still below 20ms. \subsection{System Design Recommendations} As alluded to in the previous two subsections, the Hungarian algorithm provides the best results and, from a distribution perspective, probably the fairest distribution out of all algorithms. Important properties of the algorithm are that it finds matchings of maximal cardinality, as well as matchings with the lowest average rank combined with a low rank standard deviation. While the runtime of that algorithm was the highest among all other algorithms presented, performance should not be a problem for real-world student-seminar matching scenarios, where the instances are somewhat small. To improve performance, the Max-Rank algorithm \cite{SngThesis} could be used for finding an exact result or, for very large instances, the Mod-Popular-CHA algorithm can be used. To improve the performance of all algorithms, it would also help to require students to supply at least $k$ preferences, where $k$ could be a fixed fraction of the total seminar count. The experiments have indicated that longer preference lists, especially when power-law-like preference distributions are used, make the algorithms perform better. Besides that, the online variant of the problem can be solved using a first-come first-serve mechanism, like RSD, or an algorithm like Ranking (Algorithm \ref{alg:ranking}) that maximizes cardinality in the online scenario (See Appendix \ref{sec:online-variants} for more information on the online problem).
{ "alphanum_fraction": 0.8098543623, "avg_line_length": 386.6842105263, "ext": "tex", "hexsha": "c51a8faeb1d56f4f697d1cd5fb8bf6a756f6fd85", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b267dc2eb69cc7a1c2421b76277f69517957375d", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "aaronoe/bachelorarbeit", "max_forks_repo_path": "chapters/8_discussion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b267dc2eb69cc7a1c2421b76277f69517957375d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "aaronoe/bachelorarbeit", "max_issues_repo_path": "chapters/8_discussion.tex", "max_line_length": 2053, "max_stars_count": null, "max_stars_repo_head_hexsha": "b267dc2eb69cc7a1c2421b76277f69517957375d", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "aaronoe/bachelorarbeit", "max_stars_repo_path": "chapters/8_discussion.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1518, "size": 7347 }
%\documentclass[pageno]{jpaper} \documentclass[12pt]{report} \usepackage[normalem]{ulem} \usepackage{graphicx} \usepackage{caption} %\usepackage{subcaption} \usepackage{amsmath} \usepackage{amssymb} \usepackage{color} \usepackage{listings} \usepackage{booktabs} \usepackage{geometry} \usepackage{fixltx2e} \usepackage{hyperref} \graphicspath{ {figures/} } \geometry{margin=0.75in} %\lstset{basicstyle=\footnotesize,frame=single} \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} \lstset{frame=none, language=bash, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{dkgreen}, stringstyle=\color{mauve}, breaklines=true, breakatwhitespace=true, tabsize=3 } \newenvironment{blockquote}{% \par% \medskip \leftskip=2em\rightskip=2em% \ignorespaces}{% \par\medskip} \begin{document} \title{ {ScaffCC: Scaffold Compiler Collection}\\ {\large User Manual}\\ } \author{Ali JavadiAbhari, Adam Holmes, Shruti Patil, Jeff Heckey, Daniel Kudrow,\\ Pranav Gokhale, David Noursi, Lee Ehudin, \\ Yongshan Ding, Xin-Chuan Ryan Wu, Yunong Shi} \date{June 2018} \maketitle \chapter*{Abstract} ~\\ ScaffCC~\cite{scaffcc} is a compiler and scheduler for the Scaffold programing language. It is written using the LLVM~\cite{LLVM} open-source infrastructure. It is for the purpose of writing and analyzing code for quantum computing applications.\\ ScaffCC enables researchers to compile quantum applications written in Scaffold to a low-level quantum assembly format (QASM), apply error correction, and generate time and area metrics. It is written to be scalable up to problem sizes in which quantum algorithms outperform classical ones, and as such provide valuable insight into the overheads involved and possible optimizations for a realistic implementation on a future device technology.\\ If you use ScaffCC in your publications, please cite this work as follows:\\ Ali JavadiAbhari, Shruti Patil, Daniel Kudrow, Jeff Heckey, Alexey Lvov, Frederic Chong and Margaret Martonosi, {\em``ScaffCC: A Framework for Compilation and Analysis of Quantum Computing Programs,"} ACM International Conference on Computing Frontiers (CF 2014), Cagliari, Italy, May 2014\\ \tableofcontents \include{ch-release/chapter-release} \include{ch-inst/chapter-inst} \include{ch-usage/chapter-usage} \include{ch-apps/chapter-apps} \include{ch-rkqc/chapter-rkqc} \include{ch-expand/chapter-expand} \bibliography{references} \bibliographystyle{IEEEtranS} \end{document}
{ "alphanum_fraction": 0.7708565072, "avg_line_length": 29.6373626374, "ext": "tex", "hexsha": "19dbdc5377562507b6afcbadb5a13b9cfa2717d3", "lang": "TeX", "max_forks_count": 62, "max_forks_repo_forks_event_max_datetime": "2021-12-29T17:55:58.000Z", "max_forks_repo_forks_event_min_datetime": "2016-08-29T17:28:11.000Z", "max_forks_repo_head_hexsha": "737ae90f85d9fe79819d66219747d27efa4fa5b9", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "clairechingching/ScaffCC", "max_forks_repo_path": "docs/Documentation.tex", "max_issues_count": 35, "max_issues_repo_head_hexsha": "737ae90f85d9fe79819d66219747d27efa4fa5b9", "max_issues_repo_issues_event_max_datetime": "2021-09-27T16:05:50.000Z", "max_issues_repo_issues_event_min_datetime": "2016-07-25T01:23:07.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "clairechingching/ScaffCC", "max_issues_repo_path": "docs/Documentation.tex", "max_line_length": 446, "max_stars_count": 158, "max_stars_repo_head_hexsha": "737ae90f85d9fe79819d66219747d27efa4fa5b9", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "clairechingching/ScaffCC", "max_stars_repo_path": "docs/Documentation.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-25T00:56:20.000Z", "max_stars_repo_stars_event_min_datetime": "2016-07-21T10:45:05.000Z", "num_tokens": 787, "size": 2697 }
\section{Experimental results}\label{sec:results} \subsection{Testbed} All of our performance results use a hardware testbed that consists of two Intel Xeon E5 2440 v2 1.9GHz servers, each with 1 CPU socket and 8 physical cores with hyperthreading enabled. Each target server has an Intel 10GbE X540-AT2 dual port NIC, with the two ports of the Intel NIC on one server connected to the two ports on the identical NIC on the other server. We installed p4c-xdp on one server, the {\em target server}, and attached the XDP program to the port that receives the packets The other server, the {\em source server}, generates packets at the maximum 10~Gbps packet rate of 14.88~Mpps using the DPDK-based TRex~\cite{trex} traffic generator. The source server sends minimum length 64-byte packets in a {\em single} UDP flow to one port of the target server, and receives the forwarded packets on the same port. At the target server, we use only one core to process all packets. Every packet received goes through the pipeline specified in P4. We use the sample p4 programs in the tests directory and the following metrics to understand the performance impact of the P4-generated XDP program: \begin{itemize} \item Packet Processing Rate (Mpps): Once the XDP program finishes processing the packet, it returns one of the actions mentioned in section~\ref{sec:background}. When we want to count the number of packets that can be dropped per second, we modify each p4 program to always return XDP\_DROP. \item CPU Utilization: Every packet processed by the XDP program is run under the per-core software IRQ daemon, named \texttt{ksoftirqd/\textit{core}}. All packets are processed by only one core with one kthread, the ksoftirqd, and we measure the CPU utilization of the ksoftirqd on the core. \item Number of BPF instructions verified: For each program, we list the complexity as the number of BPF instructions the eBPF verifier scans. \end{itemize} The target server is running Linux kernel 4.19-rc5 and for all our tests, the BPF JIT (Just-In-Time) compiler is enabled and JIT hardening is disabled. All programs are compiled with clang 3.8 with llvm 5.0. For each test program, we use the following command from iproute2 to load it into kernel: \begin{lstlisting}[frame=none] ip link set dev eth0 xdp obj xdp1.o verb \end{lstlisting} The Intel 10GbE X540 NIC is running the \texttt{ixgbe} driver with 16 RX queues set-up. Since the source server is sending single UDP flow, packets always arrive at a single queue ID. As a result, we collect the number of packets being dropped at this queue. \subsection{Results} To compute the baseline performance we wrote two small XDP programs by hand: \texttt{SimpleDrop}, drops all packets by returning \texttt{XDP\_DROP} immediately. \texttt{SimpleTX} forwards the packet to the receiving port returning \texttt{XDP\_TX}. Each of these programs consists of only two BPF instructions. \begin{lstlisting}[frame=none] /* SimpleDrop */ 0: (b7) r0 = 1 // XDP_DROP 1: (95) exit /* SimpleTX */ 0: (b7) r0 = 3 // XDP_TX 1: (95) exit \end{lstlisting} After, we attached the following P4 programs to the receiving device: \begin{itemize} \item xdp1.p4: Parse Ethernet/IPv4 header, deparse it, and drop. \item xdp3.p4: Parse Ethernet/IPv4 header, lookup a MAC address in a map, deparse it, and drop. \item xdp6.p4: Parse Ethernet/IPv4 header, lookup and get a new TTL value from eBPF map, set to IPv4 header, deparse it, and drop. \item xdp7.p4: Parse Ethernet/IPv4/UDP header, write a pre-defined source port and source IP, recalculate checksum, deparse, and drop. \item xdp11.p4: Parse Ethernet/IPv4 header, swap src/dst MAC address, deparse it, and send back to the same port (XDP\_TX). \item xdp15.p4: Parse Ethernet header, insert a customized 8-byte header, deparse it, and send back to the same port (XDP\_TX). \end{itemize} \begin{table} \centering \small \begin{tabular}{llll} \underline{P4 program} & \underline{CPU Util.} & \underline{Mpps} & \underline{Insns./Stack}\\ SimpleDrop & 75\% & 14.4 & 2/0 \\ SimpleTX & 100\% & 7.2 & 2/0 \\ xdp1.p4 & 100\% & 8.1 & 277/256 \\ xdp3.p4 & 100\% & 7.1 & 326/256 \\ xdp6.p4 & 100\% & 2.5 & 335/272 \\ xdp7.p4 & 100\% & 5.7 & 5821/336 \\ xdp11.p4 & 100\% & 4.7 & 335/216 \\ xdp15.p4 & 100\% & 5.5 & 96/56\\ \end{tabular} \caption{\footnotesize Performance of XDP program generated by p4c-xdp compiler using single core.} \label{tab:perf} \end{table} As shown in Table~\ref{tab:perf}, xdp1.p4 allows us to measure the overhead introduced by parsing and deparsing: a drop from 14.4~Mpps to 8.1~Mpps. xdp3.p4 reduces the rate by another million PPS due to the eBPF map lookup (this operation always returns NULL, no value from the map is accessed). xdp6.p4 has significant overhead because it accesses a map, finds a new TTL value, and writes to the IPv4 header. Interestingly, although xdp7.p4 does extra parsing to the UDP header and checksum recalculation, it has only a moderate overhead because of the lack of map accesses. Finally, xdp11.p4 and xdp15.p4 show the transmit (XDP\_TX) performance. Compared with xdp11, xdp15.p4 invokes the \texttt{bpf\_adjust\_head} helper function to reset the pointer for extra bytes. It does not incur much overhead because there is already a reserved space in front of every XDP packet frame. \subsection{Performance Analysis} To further understand the performance overhead of programs generated by p4c-xdp, we started broke down the CPU utilization. We used the Linux perf tool on the process ID of the ksoftirqd that shows 100\%: \begin{lstlisting}[frame=none] perf record -p <pid of ksoftirqd> sleep 10 \end{lstlisting} \noindent The following output shows the profile of xdp1.p4: {\scriptsize \begin{verbatim} 83.19% [kernel.kallsyms] [k] ___bpf_prog_run 8.14% [ixgbe] [k] ixgbe_clean_rx_irq 4.82% [kernel.kallsyms] [k] nmi 1.48% [kernel.kallsyms] [k] bpf_xdp_adjust_head 1.07% [kernel.kallsyms] [k] __rcu_read_unlock 0.40% [ixgbe] [k] ixgbe_alloc_rx_buffers \end{verbatim} } This confirms that most of the CPU cycles are spent on executing the XDP program, \texttt{\_\_\_bpf\_prog\_run}, which caused us to investigate the eBPF C code of xdp1.p4. \begin{table} \centering \small \begin{tabular}{llll} \underline{P4 program} & \underline{CPU Util.} & \underline{Mpps} & \underline{Insns./Stack}\\ xdp1.p4 & 77\% & 14.8 & 26/0 \\ xdp3.p4 & 100\% & 13 & 100/16 \\ xdp6.p4 & 100\% & 12 & 98/40 \\ \end{tabular} \caption{\footnotesize Performance of XDP program without deparser.} \label{tab:perf2} \end{table} After commenting out the deparser C code, performance increases significantly (see Table~\ref{tab:perf2}). In the generated code, the p4c-xdp compiler always writes back the entire packet content, even when the P4 program does not modify any fields. In addition, the parser/deparser incur byte-order translation, e.g., htonl, ntohl. This could be avoided by always using network byte-order in P4 and XDP. We plan to implement optimizations to reduce this overhead.
{ "alphanum_fraction": 0.7475850483, "avg_line_length": 42.7724550898, "ext": "tex", "hexsha": "d4f6294fe101679b1da93bc5d662305062e09faf", "lang": "TeX", "max_forks_count": 29, "max_forks_repo_forks_event_max_datetime": "2022-03-22T07:05:24.000Z", "max_forks_repo_forks_event_min_datetime": "2017-06-16T22:47:16.000Z", "max_forks_repo_head_hexsha": "f1d0b6e37e71f0b76fd8b9686caff9ac5b76296e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "pzanna/p4c-xdp", "max_forks_repo_path": "doc/lpc18/results.tex", "max_issues_count": 59, "max_issues_repo_head_hexsha": "f1d0b6e37e71f0b76fd8b9686caff9ac5b76296e", "max_issues_repo_issues_event_max_datetime": "2022-03-23T21:20:59.000Z", "max_issues_repo_issues_event_min_datetime": "2017-08-09T23:10:13.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "pzanna/p4c-xdp", "max_issues_repo_path": "doc/lpc18/results.tex", "max_line_length": 96, "max_stars_count": 165, "max_stars_repo_head_hexsha": "f1d0b6e37e71f0b76fd8b9686caff9ac5b76296e", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "pzanna/p4c-xdp", "max_stars_repo_path": "doc/lpc18/results.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-26T05:47:45.000Z", "max_stars_repo_stars_event_min_datetime": "2017-04-02T01:44:56.000Z", "num_tokens": 2167, "size": 7143 }
\subsection{Variance of the IV estimator} In OLS we had: \(\hat {\theta_{OLS}} = (X^TX)^{-1}X^Ty\) \(Var [\hat {\theta_{OLS}}]=(X^TX)^{-1}X^T\Omega X(X^TX)^{-1}\) With IV we have \(\hat {\theta_{IV}} = (Z^TX)^{-1}Z^Ty\) \(Var [\hat {\theta_{IV}}]=(Z^TX)^{-1}Z^T\Omega Z(Z^TX)^{-1}\) We can use weighted least squares for \(\Omega \).
{ "alphanum_fraction": 0.5539358601, "avg_line_length": 19.0555555556, "ext": "tex", "hexsha": "5694f5f857417373996370dc947e60a340a0d591", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/olsMore/05-03-variance.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/olsMore/05-03-variance.tex", "max_line_length": 63, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/olsMore/05-03-variance.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 139, "size": 343 }
\documentclass[aspectratio=169]{beamer} % because we need to claim weird things \newtheorem{claim}{Claim} \newtheorem{defn}{Definition} %\newtheorem{lemma}{Lemma} \newtheorem{thm}{Theorem} \newtheorem{vita}{Vit\ae} \newtheorem{qotd}{Quote of the Day} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{listings} \usepackage{color} \usepackage{graphics} \usepackage{ulem} \bibliographystyle{unsrt} % background image \usebackgroundtemplate% {% \includegraphics[width=\paperwidth,height=\paperheight]{../artifacts/stemulus.pdf}% } \setbeamertemplate{caption}[numbered] \lstset{% breaklines=true, captionpos=b, frame=single, keepspaces=true, showstringspaces=false } % page numbers \addtobeamertemplate{navigation symbols}{}{% \usebeamerfont{footline}% \usebeamercolor[fg]{footline}% \hspace{1em}% \insertframenumber/\inserttotalframenumber } % presentation header \usetheme{Warsaw} \title{Big Data in a Big World} \author{Dylan Lane McDonald} \institute{CNM STEMulus Center\\Web Development with PHP} \date{\today} \begin{document} \lstset{language=Java} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Outline} \tableofcontents \end{frame} \section{Big Data in a Big World} \begin{frame} \frametitle{Big Data in a Big World} Data from other sites can be integrated into your site using external APIs. \mbox{}\\ \begin{defn} An \textbf{Application Programming Interface (API)} is a publicly available set of functions or methods from an external source. \end{defn} \mbox{}\\ All the PHP and JavaScript methods \& functions we've used so far are included as part of the core PHP \& JavaScript API. jQuery is also an API we've used in this class. Another example of an API we've used is \texttt{mysqli}. For the purposes of this discussion, we will be using APIs that allow access to external data. \end{frame} \begin{frame} \frametitle{Acquiring Data from an API} \begin{figure} \includegraphics[scale=0.2]{../artifacts/json-roadmap.pdf} \caption{Acquiring Data from an API} \label{fig:jsonflow} \end{figure} \end{frame} \section{Data Formats} \begin{frame} \frametitle{Data Formats} The following data formats are available. These are listed in order of preference both in class and in industry. \begin{enumerate} \item \textbf{JSON}: \textbf{J}ava\textbf{S}cript \textbf{O}bject \textbf{N}otation. A simplified, compact notation for representing objects. All major languages have support for writing JSON strings to send to an API, as well as reading JSON strings and converting them to their own internal objects. \item \textbf{SOAP}: Used to stand for \textbf{S}imple \textbf{O}bject \textbf{A}ccess \textbf{P}rotocol. SOAP is a modification on XML and standardizes the format of the messages being sent to \& from sites. \item \textbf{XML} E\textbf{x}tensible \textbf{M}arkup \textbf{L}anguage. \textbf{XML}: allows one to define one's own markup language and use these tags to house the data being exchanged. \end{enumerate} \end{frame} \end{document}
{ "alphanum_fraction": 0.767218543, "avg_line_length": 32.8260869565, "ext": "tex", "hexsha": "75121a322cf29999861e2b7364dff77e0890d13b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "27903fb390d37297293be406c1b1cd85a4c628bb", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dylan-mcdonald/latex-slides", "max_forks_repo_path": "json-api/json-api.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "27903fb390d37297293be406c1b1cd85a4c628bb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dylan-mcdonald/latex-slides", "max_issues_repo_path": "json-api/json-api.tex", "max_line_length": 321, "max_stars_count": null, "max_stars_repo_head_hexsha": "27903fb390d37297293be406c1b1cd85a4c628bb", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dylan-mcdonald/latex-slides", "max_stars_repo_path": "json-api/json-api.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 867, "size": 3020 }
\ \section{SofaPython scene files} It is possible, once the plugin is charged in runSofa, to load python script *.py files directly instead of xml *.scn files. This way, you can create scene graphs in a procedural way instead of a descriptive way. An example of this is provided in the \textcode{examples/PythonScene.py} sample scene of the plugin.
{ "alphanum_fraction": 0.7696629213, "avg_line_length": 44.5, "ext": "tex", "hexsha": "9242708c979b97f41cfc30d1b96025f28e8bd42d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_forks_repo_licenses": [ "OML" ], "max_forks_repo_name": "sofa-framework/issofa", "max_forks_repo_path": "applications/plugins/SofaPython/doc/SofaPython_Scenes.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "OML" ], "max_issues_repo_name": "sofa-framework/issofa", "max_issues_repo_path": "applications/plugins/SofaPython/doc/SofaPython_Scenes.tex", "max_line_length": 212, "max_stars_count": null, "max_stars_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389", "max_stars_repo_licenses": [ "OML" ], "max_stars_repo_name": "sofa-framework/issofa", "max_stars_repo_path": "applications/plugins/SofaPython/doc/SofaPython_Scenes.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 86, "size": 356 }
\iffalse \subsubsection{Controllers} \begin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{./assets/p2/Controllers_1.png} \end{figure} A controller's purpose is to receive specific requests for the application. The routing mechanism controls which controller receives which requests. Frequently, each controller has more than one route, and different routes can perform different actions. \subsubsection{Providers} \begin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{./assets/p2/Providers_1.png} \end{figure} Nest introduces a new concept called providers. Many of the basic Nest classes may be treated as a provider --- services, repositories, factories, helpers, and so on. The main idea of a provider is that it can be injected as dependency; this means objects can create various relationships with each other, and the function of ``wiring up'' instances of objects can largely be delegated to the Nest runtime system. \subsubsection{Modules} \begin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{./assets/p2/Modules_1.png} \end{figure} Each application has at least one module, a root module. The root module is the starting point Nest uses to build the application graph --- the internal data structure Nest uses to resolve module and provider relationships and dependencies. While very small applications may theoretically have just the root module, this is not the typical case. We want to emphasize that modules are strongly recommended as an effective way to organize your components. Thus, for most applications, the resulting architecture will employ multiple modules, each encapsulating a closely related set of capabilities. The module encapsulates providers by default. This means that it's impossible to inject providers that are neither directly part of the current module nor exported from the imported modules. Thus, you may consider the exported providers from a module as the module's public interface, or API\@. \fi
{ "alphanum_fraction": 0.8003003003, "avg_line_length": 54, "ext": "tex", "hexsha": "81b415e0c1069c79d54bf9eadb6598298f9d8183", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fc500000c41635dbf09d026b17e93ae7750e9132", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Smithienious/CO3103-Proj", "max_forks_repo_path": "report/draft.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "fc500000c41635dbf09d026b17e93ae7750e9132", "max_issues_repo_issues_event_max_datetime": "2021-10-14T14:19:04.000Z", "max_issues_repo_issues_event_min_datetime": "2021-10-14T14:07:01.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Smithienious/CO3103-Proj", "max_issues_repo_path": "report/draft.tex", "max_line_length": 294, "max_stars_count": null, "max_stars_repo_head_hexsha": "fc500000c41635dbf09d026b17e93ae7750e9132", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Smithienious/CO3103-Proj", "max_stars_repo_path": "report/draft.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 431, "size": 1998 }
% Options for packages loaded elsewhere \PassOptionsToPackage{unicode}{hyperref} \PassOptionsToPackage{hyphens}{url} \PassOptionsToPackage{dvipsnames,svgnames*,x11names*}{xcolor} % \documentclass[ ]{krantz} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provide euro and other symbols \else % if luatex or xetex \usepackage{unicode-math} \defaultfontfeatures{Scale=MatchLowercase} \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} \fi % Use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} \IfFileExists{microtype.sty}{% use microtype if available \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \makeatletter \@ifundefined{KOMAClassName}{% if non-KOMA class \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt}} }{% if KOMA class \KOMAoptions{parskip=half}} \makeatother \usepackage{xcolor} \IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available \IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} \hypersetup{ pdftitle={PSYC6060 Course Notes}, pdfauthor={David J. Stanley}, colorlinks=true, linkcolor=Maroon, filecolor=Maroon, citecolor=Blue, urlcolor=Blue, pdfcreator={LaTeX via pandoc}} \urlstyle{same} % disable monospaced font for URLs \usepackage{color} \usepackage{fancyvrb} \newcommand{\VerbBar}{|} \newcommand{\VERB}{\Verb[commandchars=\\\{\}]} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \usepackage{framed} \definecolor{shadecolor}{RGB}{248,248,248} \newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{0.33,0.33,0.33}{#1}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textbf{\textit{#1}}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.61,0.61,0.61}{#1}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.06,0.06,0.06}{#1}} \newcommand{\BuiltInTok}[1]{#1} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.5,0.5,0.5}{#1}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textit{#1}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textbf{\textit{#1}}}} \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.27,0.27,0.27}{\textbf{#1}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.27,0.27,0.27}{#1}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.06,0.06,0.06}{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textbf{\textit{#1}}}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.14,0.14,0.14}{\textbf{#1}}} \newcommand{\ExtensionTok}[1]{#1} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.06,0.06,0.06}{#1}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\ImportTok}[1]{#1} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textbf{\textit{#1}}}} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.27,0.27,0.27}{\textbf{#1}}} \newcommand{\NormalTok}[1]{#1} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.43,0.43,0.43}{\textbf{#1}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textit{#1}}} \newcommand{\RegionMarkerTok}[1]{#1} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.5,0.5,0.5}{#1}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.5,0.5,0.5}{#1}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0,0,0}{#1}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.5,0.5,0.5}{#1}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.37,0.37,0.37}{\textbf{\textit{#1}}}} \usepackage{longtable,booktabs} % Correct order of tables after \paragraph or \subparagraph \usepackage{etoolbox} \makeatletter \patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{} \makeatother % Allow footnotes in longtable head/foot \IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}} \makesavenoteenv{longtable} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} % Set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} \usepackage{booktabs} \usepackage{longtable} \usepackage[bf,singlelinecheck=off]{caption} \usepackage{framed,color} \definecolor{shadecolor}{RGB}{248,248,248} \renewcommand{\textfraction}{0.05} \renewcommand{\topfraction}{0.8} \renewcommand{\bottomfraction}{0.8} \renewcommand{\floatpagefraction}{0.75} \renewenvironment{quote}{\begin{VF}}{\end{VF}} \let\oldhref\href \renewcommand{\href}[2]{#2\footnote{\url{#1}}} \makeatletter \newenvironment{kframe}{% \medskip{} \setlength{\fboxsep}{.8em} \def\at@end@of@kframe{}% \ifinner\ifhmode% \def\at@end@of@kframe{\end{minipage}}% \begin{minipage}{\columnwidth}% \fi\fi% \def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep \colorbox{shadecolor}{##1}\hskip-\fboxsep % There is no \\@totalrightmargin, so: \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% \MakeFramed {\advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize \@setminipage}}% {\par\unskip\endMakeFramed% \at@end@of@kframe} \makeatother \newenvironment{rmdblock}[1] { \begin{itemize} \renewcommand{\labelitemi}{ \raisebox{-.7\height}[0pt][0pt]{ {\setkeys{Gin}{width=3em,keepaspectratio}\includegraphics{images/#1}} } } \setlength{\fboxsep}{1em} \begin{kframe} \item } { \end{kframe} \end{itemize} } \newenvironment{rmdnote} {\begin{rmdblock}{note}} {\end{rmdblock}} \newenvironment{rmdcaution} {\begin{rmdblock}{caution}} {\end{rmdblock}} \newenvironment{rmdimportant} {\begin{rmdblock}{important}} {\end{rmdblock}} \newenvironment{rmdtip} {\begin{rmdblock}{tip}} {\end{rmdblock}} \newenvironment{rmdwarning} {\begin{rmdblock}{warning}} {\end{rmdblock}} \renewenvironment{Shaded}{\begin{kframe}}{\end{kframe}} \usepackage{makeidx} \makeindex \urlstyle{tt} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \frontmatter \usepackage[]{natbib} \bibliographystyle{apalike} \title{PSYC6060 Course Notes} \author{David J. Stanley} \date{2020-06-23} \begin{document} \maketitle % you may need to leave a few empty pages before the dedication page %\cleardoublepage\newpage\thispagestyle{empty}\null %\cleardoublepage\newpage\thispagestyle{empty}\null %\cleardoublepage\newpage \thispagestyle{empty} \begin{center} To my students, from whom I've learn so much about teaching. %\includegraphics{images/dedication.pdf} \end{center} \setlength{\abovedisplayskip}{-5pt} \setlength{\abovedisplayshortskip}{-5pt} { \hypersetup{linkcolor=} \setcounter{tocdepth}{2} \tableofcontents } \listoftables \listoffigures \hypertarget{preface}{% \chapter*{Preface}\label{preface}} R's emerging role in psychology will be described here.. \hypertarget{structure-of-the-book}{% \section*{Structure of the book}\label{structure-of-the-book}} I'll put more about the structure of the book here in the future. \hypertarget{software-information-and-conventions}{% \section*{Software information and conventions}\label{software-information-and-conventions}} I used the \textbf{knitr}\index{knitr} package \citep{xie2015} and the \textbf{bookdown}\index{bookdown} package \citep{R-bookdown} to compile my book. My R session information is shown below: \begin{Shaded} \begin{Highlighting}[] \NormalTok{xfun}\OperatorTok{::}\KeywordTok{session_info}\NormalTok{()} \end{Highlighting} \end{Shaded} \begin{verbatim} ## R version 4.0.1 (2020-06-06) ## Platform: x86_64-apple-darwin17.0 (64-bit) ## Running under: macOS Catalina 10.15.5, RStudio 1.3.959 ## ## Locale: en_CA.UTF-8 / en_CA.UTF-8 / en_CA.UTF-8 / C / en_CA.UTF-8 / en_CA.UTF-8 ## ## Package version: ## askpass_1.1 base64enc_0.1.3 bookdown_0.19.1 ## compiler_4.0.1 curl_4.3 digest_0.6.25 ## evaluate_0.14 glue_1.4.1 graphics_4.0.1 ## grDevices_4.0.1 highr_0.8 htmltools_0.4.0 ## jsonlite_1.6.1 knitr_1.28 magrittr_1.5 ## markdown_1.1 methods_4.0.1 mime_0.9 ## openssl_1.4.1 packrat_0.5.0 Rcpp_1.0.4.6 ## rlang_0.4.6 rmarkdown_2.2 rsconnect_0.8.16 ## rstudioapi_0.11 stats_4.0.1 stringi_1.4.6 ## stringr_1.4.0 sys_3.3 tinytex_0.23 ## tools_4.0.1 utils_4.0.1 xfun_0.14 ## yaml_2.2.1 \end{verbatim} Package names are in bold text (e.g., \textbf{rmarkdown}), and inline code and filenames are formatted in a typewriter font (e.g., \texttt{knitr::knit(\textquotesingle{}foo.Rmd\textquotesingle{})}). Function names are followed by parentheses (e.g., \texttt{bookdown::render\_book()}). \hypertarget{about-the-author}{% \chapter*{About the Author}\label{about-the-author}} David J. Stanley is an Associate Professor of Industrial and Organizational Psychology at the University of Guelph in Canada. He obtained his PhD from Western University in London, Ontario. David has published articles in Advances in Methods and Practices in Psychological Science, Organizational Research Methods, Journal of Applied Psychology, Perspectives in Psychological Science, Journal of Business and Psychology, Journal of Vocational Behaviour, Journal of Personality and Social Psychology, Behavior Research Methods, Industrial and Organizational Psychology, and Emotion among other journals. David also created the apaTables R package. \mainmatter \hypertarget{introduction}{% \chapter{Introduction}\label{introduction}} \hypertarget{shift-return-somewhere}{% \section{SHIFT RETURN SOMEWHERE}\label{shift-return-somewhere}} Welcome! In this guide, we will teach you about statistics using the statistical software R with the interface provided by R Studio. The purpose of this chapter to is provide you with a set of activities that get you up-and-running in R quickly so get a sense of how it works. In later chatpers we will revisit these same topics in more detail. \hypertarget{a-focus-on-workflow}{% \section{A focus on workflow}\label{a-focus-on-workflow}} An important part of this guide is training you in a workflow that will avoid many problems than can occur when using R. \hypertarget{r-works-with-plug-ins}{% \section{R works with plug-ins}\label{r-works-with-plug-ins}} R is a statistical language with many plug-ins called \textbf{packages} that you will use for analyses. You can think of R as being like your smartphone. To do things with your phone you need \textbf{an App} (R equivalent: a \emph{package}) from the App Store (R equivalent: \emph{CRAN}). Apps need to be \textbf{downloaded} (R equivalent: \emph{install.packages}) before you can use them. To use the app you need \textbf{Open} it (R equivalent: \emph{library command}). These similarities are illustrated in Table \ref{tab:appstore} below. \begin{table} \caption{\label{tab:appstore}R packages are similar to smart phone apps (Kim, 2018)} \centering \begin{tabular}[t]{ll} \toprule Smart Phone Terminology & R Terminology\\ \midrule App & package\\ App Store & CRAN\\ Download App from App Store & install.packages("apaTables", dependencies = TRUE)\\ Open App & library("apaTables")\\ \bottomrule \end{tabular} \end{table} \hypertarget{create-an-account-at-r-studio-cloud}{% \section{Create an account at R Studio Cloud}\label{create-an-account-at-r-studio-cloud}} \href{http://www.rstudio.cloud}{R Studio Cloud} accounts are free and required for this guide. Please go to the website and set up a new account. \hypertarget{join-the-class-workspace}{% \section{Join the class workspace}\label{join-the-class-workspace}} To do the assignment required for this class you need to join the class workspace on R Studio Cloud\index{R Studio Cloud}. To do so: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Log into R Studio Cloud (if you haven't already done so). \item Go to your university email account and find the message with the subject ``R Studio Workspace Invitation''. In this message there is a link to the class R Studio Cloud workspace. \item Click on the workspace link in the email or paste it into your web browser. You should see a screen like the one below in Figure \ref{fig:join}. Click on the Join button. \end{enumerate} \begin{figure} \includegraphics[width=0.7\linewidth]{ch_introduction/images/screenshot_join} \caption{Screen shot of workspace join message.}\label{fig:join} \end{figure} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{3} \tightlist \item Then you should see the welcome message illustrated in Figure \ref{fig:welcome}. Above this message is the Projects menu option. Click on the word Project. \end{enumerate} \begin{figure} \includegraphics[width=0.7\linewidth]{ch_introduction/images/screenshot_welcome} \caption{Screen shot of welcome messag}\label{fig:welcome} \end{figure} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{4} \tightlist \item You should now see the First Project displayed as in \ref{fig:assignment}. Click the Start button. You will then move to a view of R Studio. \end{enumerate} \begin{figure} \includegraphics[width=0.7\linewidth]{ch_introduction/images/screenshot_assignment} \caption{Screen shot of starting first assignment}\label{fig:assignment} \end{figure} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{4} \tightlist \item In R Studio it is essential you use projects to keep your files organized and in the same spot. For this course, when your start an assignment on R Studio Cloud and the project will already have been made for you. Later you will learn to make your own R Studio Projects. \end{enumerate} \hypertarget{exploring-the-r-studio-interface}{% \section{Exploring the R Studio Interface}\label{exploring-the-r-studio-interface}} Once you have opened (or created) a Project folder, you are presented with the R Studio interface. There are a few key elements to the user interface that are illustrated in Figure \ref{fig:interface} In the lower right of the screen you can see the a panel with several tabs (i.e., Files, Plots, Packages, etc) that I will refer to as the Files pane. You look in this pane to see all the files associated with your project. On the left side of the screen is the Console which is an interactive pane where you type and obtain results in real time. I've placed two large grey blocks on the screen with text to more clearly identify the Console and Files panes. Not shown in this figure is the Script panel where we can store our commands for later reuse. \begin{figure} \includegraphics[width=0.7\linewidth]{ch_introduction/images/screenshot_interface} \caption{R Studio interface}\label{fig:interface} \end{figure} \hypertarget{console-panel}{% \subsection{Console panel}\label{console-panel}} When you first start R, the Console panel is on the left side of the screen. Sometimes there are two panels on the left side (one above the other); if so, the Console panel is the lower one (and labeled accordingly). We can use R a bit like a calculator. Try typing the following into the Console window: 8 + 6 + 7 + 5. You can see that R immediately produced the result on a line preceded by two hashtags (\#\#). \begin{Shaded} \begin{Highlighting}[] \DecValTok{8} \OperatorTok{+}\StringTok{ }\DecValTok{6} \OperatorTok{+}\StringTok{ }\DecValTok{7} \OperatorTok{+}\StringTok{ }\DecValTok{5} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 26 \end{verbatim} We can also put the result into a variable to store it. Later we can use the print command to see that result. In the example below we add the numbers 3, 0, and 9 and store the result in the variable my\_sum. The text ``\textless-'' indicate you are putting what is on the right side of the arrow into the variable on the left side of the arrow. You can think of a variable as cup into which you can put different things. In this case, imagine a real-world cup with my\_sum written on the outside and inside the cup we have stored the sum of 3, 0, and 9 (i.e., 12). \begin{Shaded} \begin{Highlighting}[] \NormalTok{my_sum <-}\StringTok{ }\DecValTok{3} \OperatorTok{+}\StringTok{ }\DecValTok{0} \OperatorTok{+}\StringTok{ }\DecValTok{9} \end{Highlighting} \end{Shaded} We can inspect the contents of the my\_sum variable (i.e., my\_sum cup) with the print command: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_sum)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 12 \end{verbatim} Variable are very useful in R. We will use them to store a single number, an entire data set, the results of an analysis, or anything else. \hypertarget{script-panel}{% \subsection{Script Panel}\label{script-panel}} Although you can use R with just with the Console panel, it's a better idea to use scripts via the Script panel - not visible yet. Scripts are just text files with the commands you use stored in them. You can run a script (as you will see below) using the Run or Source buttons located in the top right of the Script panel. Scripts are valuable because if you need to run an analysis a second time you don't have to type the command in a second time. You can run the script again and again without retyping your commands. More importantly though, the script provides a record of your analyses. A common problem in science is that after an article is published, the authors can't reproduce the numbers in the paper. You can read more about the important problem in a surprising article in the journal \href{https://molecularbrain.biomedcentral.com/articles/10.1186/s13041-020-0552-2}{Molecular Brain}. In this article an editor reports how a request for the data underlying articles resulted in the wrong data for 40 out of 41 papers. Long story short -- keep track of the data and scripts you use for your paper. In a later chapter, it's generally poor practice to manipulate or modify or analyze your data using any menu driven software because this approach does not provide a record of what you have done. \hypertarget{writing-your-first-script}{% \section{Writing your first script}\label{writing-your-first-script}} \hypertarget{create-the-script-file}{% \subsection{Create the script file}\label{create-the-script-file}} Create a script in your R Studio project by using the menu File \textgreater{} New File \textgreater{} R Script. Save the file with an appropriate name using the File menu. The file will be saved in your Project folder. A common, and good, convention for naming is to start all script names with the word ``script'' and separate words with an underscore. You might save this first script file with the name ``script\_my\_first\_one.R''. The advantage of beginning all script files with the word script is that when you look at your list of files alphabetically, all the script files will cluster together. Likewise, it's a good idea to save all data files such that they begin with ``data\_''. This way all the data files will cluster together in your directory view as well. You can see there is already a data file with this convention called ``data\_okcupid.csv''. You can see as discussed previously, we are trying to instill an effective workflow as you learn R. Using a good naming convention (that is consistent with what others use) is part of the workflow. When you write your scripts it's a good idea to follow the \href{https://style.tidyverse.org}{tidyverse style guide} for script names, variable name, file names, and more. \hypertarget{add-a-comment-to-your-script}{% \subsection{Add a comment to your script}\label{add-a-comment-to-your-script}} In the previous section you created your first script. We begin by adding a comment to the script. A comment is something that will be read by humans rather than the computer/R. You make comments for other people that will read your code and need to understand what you have done. However, realize that you are also making comments for your future self as illustrated in an \href{https://xkcd.com/1421/}{XKCD cartoon}. A good way to start every script is with a comment that includes the date of your script (or even better when you installed your packages, more on this later). Like smartphone apps, packages are updated regularly. Sometimes after a package is updated it will no longer work with an older script. Fortunately, the \href{https://cran.r-project.org/web/packages/checkpoint/index.html}{checkpoint package}\index{R Studio Cloud} lets users role back the clock and use older versions of packages. Adding a comment with the date of your script will help future users (including you) to use your script with the same version of the package used when you wrote the script. Dating your script is an important part of an effective and reproducible workflow. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Code written on: YYYY/MM/DD } \CommentTok{# By: John Smith} \end{Highlighting} \end{Shaded} Note that in the above comment I used the internationally accepted date format\index{date format} order Year/Month/Day. Some people use the mnemonic \emph{Your My Dream} to remember this order. Wikipedia provides more information about the \href{https://en.wikipedia.org/wiki/ISO_8601}{Internationally Date Format ISO 8601}. Moving forward, I suggest you use comments to make your own personal notes in your own code as your write it. \hypertarget{background-about-the-tidyverse}{% \subsection{Background about the tidyverse}\label{background-about-the-tidyverse}} There are generally two broad ways of using R, the older way and the newer way. Using R the older way is refered to as using base R. A more modern approach to using R is the tidyverse\index{tidyverse}. The tidyverse represents a collection of packages the work together to give R a modern workflow. These packages do many things to help the data analyst (loading data, rearranging data, graphing, etc.). We will use the tidyverse approach to R in this guide. A noted the tidyverse is a collection of packges. Each package adds new commands to R. The number of packges and correspondingly the number of new commands added to R by the tidyverse is large. Below is a list of the tidyverse packages: \begin{verbatim} ## [1] "broom" "cli" "crayon" "dbplyr" ## [5] "dplyr" "forcats" "ggplot2" "haven" ## [9] "hms" "httr" "jsonlite" "lubridate" ## [13] "magrittr" "modelr" "pillar" "purrr" ## [17] "readr" "readxl" "reprex" "rlang" ## [21] "rstudioapi" "rvest" "stringr" "tibble" ## [25] "tidyr" "xml2" "tidyverse" \end{verbatim} Before you can use a package it needs to be installed -- this is the same as downloading an app from the App Store. Normally, you can install a \textbf{single} packages with the install.packages command. Previously, you needed run an install.package command for every package in the tidyverse as illustrated below (though we no longer use this approach). \begin{Shaded} \begin{Highlighting}[] \CommentTok{# The old way of installing the tidyverse packages} \CommentTok{# Like downloading apps from the app store} \KeywordTok{install.packages}\NormalTok{(}\StringTok{"broom"}\NormalTok{, }\DataTypeTok{dep =} \OtherTok{TRUE}\NormalTok{)} \KeywordTok{install.packages}\NormalTok{(}\StringTok{"cli"}\NormalTok{, }\DataTypeTok{dep =} \OtherTok{TRUE}\NormalTok{)} \KeywordTok{install.packages}\NormalTok{(}\StringTok{"ggplot"}\NormalTok{, }\DataTypeTok{dep =} \OtherTok{TRUE}\NormalTok{)} \CommentTok{# etc} \end{Highlighting} \end{Shaded} Fortunately, the tidyverse packages can now by installed with a single install.packages command. Specifically, the install.packages command below will install all of the packages listed above. \textbf{Class note: For the ``First Lab'', I've done the install.packages for you. So there is no need to use the install.packages command below in this first lab.} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{install.packages}\NormalTok{(}\StringTok{"tidyverse"}\NormalTok{, }\DataTypeTok{dep =} \OtherTok{TRUE}\NormalTok{)} \end{Highlighting} \end{Shaded} \hypertarget{add-librarytidyverse-to-your-script}{% \subsection{Add library(tidyverse) to your script}\label{add-librarytidyverse-to-your-script}} The tidyverse is now installed, so we need to activate it. We do that with the library command. Put the library line below at the top of your script file (below your comment): \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Code written on: YYYY/MM/DD } \CommentTok{# By: John Smith} \KeywordTok{library}\NormalTok{(tidyverse)} \end{Highlighting} \end{Shaded} \hypertarget{activate-tidyverse-auto-complete-for-your-script}{% \subsection{Activate tidyverse auto-complete for your script}\label{activate-tidyverse-auto-complete-for-your-script}} Select the library(tidyverse) text with your mouse/track-pad so that it is highlighted. Then click the Run button in the upper right of the Script panel. Doing this ``runs'' the selected text. After you click the Run button you should see text like the following the Console panel: \begin{verbatim} ## -- Attaching packages ------------------------------------------- tidyverse 1.3.0 -- \end{verbatim} \begin{verbatim} ## v ggplot2 3.3.1 v purrr 0.3.4 ## v tibble 3.0.1 v dplyr 1.0.0 ## v tidyr 1.1.0 v stringr 1.4.0 ## v readr 1.3.1 v forcats 0.5.0 \end{verbatim} \begin{verbatim} ## -- Conflicts ---------------------------------------------- tidyverse_conflicts() -- ## x dplyr::filter() masks stats::filter() ## x dplyr::lag() masks stats::lag() \end{verbatim} When you use library(tidyverse) to activate the tidyverse you activate the most commonly used subset of the tidyverse packages. In the output you see checkmarks beside names of the tidyverse packages you have activated. By activating these packages you have added new commands to R that you will use. Sometimes these packages replace older versions of commands in R. The ``Conflicts'' section in the output shows you where the packages you activated replaced older R commands with newer R commands. You can activate the other tidyverse package by running a library command for each package -- if needed. No need to do so now. Most importantly, running the library(tidyverse) prior to entering the rest of your script allows R Studio to present auto-complete options when typing your text. Remember to start each script with the library(tidyverse) command and then Run it so you get the autocomplete options for the rest of the commands your enter. \hypertarget{loading-your-data}{% \section{Loading your data}\label{loading-your-data}} \hypertarget{use-read_csv-not-read.csv-to-open-files.}{% \subsection{Use read\_csv (not read.csv) to open files.}\label{use-read_csv-not-read.csv-to-open-files.}} If you inspect the Files pane on the right of the screen you see the \textbf{data\_okcupid.csv} data file in our project directory. We will load this data with the commands below. If you followed the steps above, you should have auto-complete for the tidyverse commands you type for now in -- in the current R session. Enter the command below into your script. As your start to type read\_csv you will likely be presented with an auto-complete option. You can use the arrow keys to move up and down the list of options to select the one you want - then press tab to select it. Once your command looks like the one below select the text and click on the ``Run'' button. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_okcupid.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## age = col_double(), ## diet = col_character(), ## height = col_double(), ## pets = col_character(), ## sex = col_character(), ## status = col_character() ## ) \end{verbatim} The output indicates that you have loaded a data file and the type of data in each column. The sex column is of type col\_character which indicates it contains text/letters. Most of the columns are of the type character. The age and height columns contain numbers are correspondingly indicated to be the type col\_double. The label col\_double indicates that a column of numbers represented in R with \href{https://en.wikipedia.org/wiki/Double-precision_floating-point_format}{high precision}. There are other ways of representing numbers in R but this is the type we will see/use most often. \hypertarget{checking-out-your-data}{% \section{Checking out your data}\label{checking-out-your-data}} There many ways of viewing the actual data you loaded. A few of these are illustrated now. \hypertarget{view-see-a-spreadsheet-view-of-your-data}{% \subsection{view(): See a spreadsheet view of your data}\label{view-see-a-spreadsheet-view-of-your-data}} You can inspect your data in a spreadsheet view by using the view command. Do NOT add this command to your script file -- EVER. Adding it to the script can cause substantial problems. Type this command in the Console. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{view}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \hypertarget{print-see-you-data-in-the-console}{% \subsection{print(): See you data in the Console}\label{print-see-you-data-in-the-console}} You can inspect the first few rows of your data with the print() command. It is OK to add a print command to your script. Try the print() command below in the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 59,946 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly an~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ ## 4 23 vegetarian 71 likes cats m single ## 5 29 <NA> 66 likes dogs and l~ m single ## 6 29 mostly anyt~ 67 likes cats m single ## 7 32 strictly an~ 65 likes dogs and l~ f single ## 8 31 mostly anyt~ 65 likes dogs and l~ f single ## 9 24 strictly an~ 67 likes dogs and l~ f single ## 10 37 mostly anyt~ 65 likes dogs and l~ m single ## # ... with 59,936 more rows \end{verbatim} \hypertarget{head-check-out-the-first-few-rows-of-data}{% \subsection{head(): Check out the first few rows of data}\label{head-check-out-the-first-few-rows-of-data}} You can inspect the first few rows of your data with the head() command. Try the command below in the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 6 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly any~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ ## 4 23 vegetarian 71 likes cats m single ## 5 29 <NA> 66 likes dogs and l~ m single ## 6 29 mostly anyth~ 67 likes cats m single \end{verbatim} You can be even more specific and indicate you only want the first three row of your data with the head() command. Try the command below in the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(okcupid_profiles, }\DecValTok{3}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 3 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly any~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ \end{verbatim} \hypertarget{tail-check-out-the-last-few-rows-of-data}{% \subsection{tail(): Check out the last few rows of data}\label{tail-check-out-the-last-few-rows-of-data}} You can inspect the last few rows of your data with the tail() command. Try the command below in the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{tail}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 6 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 31 <NA> 62 likes dogs f single ## 2 59 <NA> 62 has dogs f single ## 3 24 mostly anyt~ 72 likes dogs and lik~ m single ## 4 42 mostly anyt~ 71 <NA> m single ## 5 27 mostly anyt~ 73 likes dogs and lik~ m single ## 6 39 <NA> 68 likes dogs and lik~ m single \end{verbatim} You can be even more specific and indicate you only want the last three row of your data with the tail() command. Try the command below in the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{tail}\NormalTok{(okcupid_profiles, }\DecValTok{3}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 3 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 42 mostly anyt~ 71 <NA> m single ## 2 27 mostly anyt~ 73 likes dogs and lik~ m single ## 3 39 <NA> 68 likes dogs and lik~ m single \end{verbatim} \hypertarget{summary-quick-summaries}{% \subsection{summary(): Quick summaries}\label{summary-quick-summaries}} You can a short summary of your data with the summary() command. Note that we will use the summary() command in many places in the guide. The output of the summary() command changes depending on what you give it - that is put inside the brackets. You can give the summary() command many things such as data, the results of a regression analysis, etc. Try the command below in the Console. You will see that summary() give the mean and median for each of the numeric variables (age and height). \begin{Shaded} \begin{Highlighting}[] \KeywordTok{summary}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## age diet height ## Min. : 18.0 Length:59946 Min. : 1.0 ## 1st Qu.: 26.0 Class :character 1st Qu.:66.0 ## Median : 30.0 Mode :character Median :68.0 ## Mean : 32.3 Mean :68.3 ## 3rd Qu.: 37.0 3rd Qu.:71.0 ## Max. :110.0 Max. :95.0 ## NA's :3 ## pets sex status ## Length:59946 Length:59946 Length:59946 ## Class :character Class :character Class :character ## Mode :character Mode :character Mode :character ## ## ## ## \end{verbatim} \hypertarget{run-vs.-source-with-echo-vs.-source}{% \section{\texorpdfstring{Run \emph{vs.} Source with Echo \emph{vs.} Source}{Run vs. Source with Echo vs. Source}}\label{run-vs.-source-with-echo-vs.-source}} There are different ways of running commands in R. So far you have used two of these. You can enter them into the Console as we have done already. Or you can put them in your script select the text and clickk the Run button. There are four ways of running commands in your script. You can: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Console: Enter commands directly \item Script: Select the command(s) and press the Run button. \item Script: Source (Without Echo) \item Script: Source With Echo \end{enumerate} Two of these approaches involve using the Source button, see Figure \ref{fig:sourcebutton}. You bring up the options for the Source button, illustrated in this figure, by clicking on the small arrow to the right of the word Source. \begin{figure} \includegraphics[width=0.35\linewidth]{ch_introduction/images/screenshot_source} \caption{Source button options}\label{fig:sourcebutton} \end{figure} \hypertarget{run-select-text}{% \subsection{Run select text}\label{run-select-text}} The Run button will run the text you highlight and present the relevant output. You have used this command a fair amount already. I strongly suggest you ONLY use the Run button when testing a command to make sure it works or to debug a script. Or to run library(tidyverse) as you start working on your script so that you get the autocomplete options. In general, you should always try to execute your R Scripts using the Source with Echo command (preceded by a Restart, see below). This ensures your script will work beginning to end for you in the future and for others that attempt to use it. Using the Run button in an ad lib basis can create output that is not reproducible. \hypertarget{source-without-echo}{% \subsection{Source (without Echo)}\label{source-without-echo}} Source (without Echo) is not designed for the typical analysis workflow. It is mostly helpful when you run simulations. When you run Source (without Echo) much of the output you would wish to read is suppressed. In general, avoid this option. If you use it, you often won't see what you want to see in the output. \hypertarget{source-with-echo}{% \subsection{Source with Echo}\label{source-with-echo}} The Source with Echo command runs all of the contents of a script and presents the output in the R console. This is the approach you should use to running your scripts in most cases. Prior to running Source with Echo (or just Source), it's always a good idea to restart R. This makes sure you clear the computer memory of any errors from any previous runs. So you should do the following EVERY time you run your script. \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Use the menu item: \textbf{Session \textgreater{} Restart R} \item Click the down arrow beside the Source button, and click on Source With Echo \end{enumerate} This will clear potentially problematic previous stats, run the script commands, and display the output in the Console. Moving forward we will use this approach for running scripts. Once you have used Source wiht Echo once, you can just click the Source button and it will use Source with Echo automatically (without the need to use the pull down option for selecting Source with Echo). \begin{rmdcaution} \begin{rmdcaution} Using Restart R before you run a script, or R code in general, is a critical workflow tip. \end{rmdcaution} \end{rmdcaution} \hypertarget{trying-source-with-echo}{% \section{Trying Source with Echo}\label{trying-source-with-echo}} Put the head(), tail(), and summary() command we used previously into your script. Then save your script using using the File \textgreater{} Save menu. You script should appear as below. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Code written on: YYYY/MM/DD } \CommentTok{# By: John Smith} \KeywordTok{library}\NormalTok{(tidyverse)} \NormalTok{okcupid_profiles <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_okcupid.csv"}\NormalTok{)} \KeywordTok{head}\NormalTok{(okcupid_profiles)} \KeywordTok{tail}\NormalTok{(okcupid_profiles)} \KeywordTok{summary}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} Now do the following: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Use the menu item: \textbf{Session \textgreater{} Restart R} \item Click the down arrow beside the Source button, and click on Source With Echo \end{enumerate} You should see the output below: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Code written on: YYYY/MM/DD } \CommentTok{# By: John Smith} \KeywordTok{library}\NormalTok{(tidyverse)} \NormalTok{okcupid_profiles <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_okcupid.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## age = col_double(), ## diet = col_character(), ## height = col_double(), ## pets = col_character(), ## sex = col_character(), ## status = col_character() ## ) \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 6 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly any~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ ## 4 23 vegetarian 71 likes cats m single ## 5 29 <NA> 66 likes dogs and l~ m single ## 6 29 mostly anyth~ 67 likes cats m single \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{tail}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 6 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 31 <NA> 62 likes dogs f single ## 2 59 <NA> 62 has dogs f single ## 3 24 mostly anyt~ 72 likes dogs and lik~ m single ## 4 42 mostly anyt~ 71 <NA> m single ## 5 27 mostly anyt~ 73 likes dogs and lik~ m single ## 6 39 <NA> 68 likes dogs and lik~ m single \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{summary}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## age diet height ## Min. : 18.0 Length:59946 Min. : 1.0 ## 1st Qu.: 26.0 Class :character 1st Qu.:66.0 ## Median : 30.0 Mode :character Median :68.0 ## Mean : 32.3 Mean :68.3 ## 3rd Qu.: 37.0 3rd Qu.:71.0 ## Max. :110.0 Max. :95.0 ## NA's :3 ## pets sex status ## Length:59946 Length:59946 Length:59946 ## Class :character Class :character Class :character ## Mode :character Mode :character Mode :character ## ## ## ## \end{verbatim} Congratulations you just ran your first script! \hypertarget{a-few-key-points-about-r}{% \section{A Few Key Points About R}\label{a-few-key-points-about-r}} Sometimes you will need to send a command additional information. Moreover, that information often needs to be grouped together into a vector or a list before you can send it to the command. We'll learn more about doing so in the future but here is a quick over view of vectors and lists to provide a foundation for future chapters. \hypertarget{vector-of-numbers}{% \subsubsection{Vector of numbers}\label{vector-of-numbers}} We can create a vector of only numbers using the ``c'' function - which you can think of as being short for ``combine'' (or concatenate). In the commands below we create a vector of a few even numbers called ``even\_numbers''. \begin{Shaded} \begin{Highlighting}[] \NormalTok{even_numbers <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{2}\NormalTok{, }\DecValTok{4}\NormalTok{, }\DecValTok{6}\NormalTok{, }\DecValTok{8}\NormalTok{, }\DecValTok{10}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(even_numbers)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 2 4 6 8 10 \end{verbatim} We can obtain the second number in the vector using the following notation: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(even_numbers[}\DecValTok{2}\NormalTok{])} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 4 \end{verbatim} \hypertarget{vector-of-characters}{% \subsubsection{Vector of characters}\label{vector-of-characters}} We can also create vectors using only characters. Note that I use \textbf{SHIFT RETURN} after each comma to move to the next line. \begin{Shaded} \begin{Highlighting}[] \NormalTok{favourite_things <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"copper kettles"}\NormalTok{,} \StringTok{"woolen mittens"}\NormalTok{,} \StringTok{"brown paper packages"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(favourite_things)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "copper kettles" "woolen mittens" ## [3] "brown paper packages" \end{verbatim} As before, can obtain the second item in the vector using the following notation: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(favourite_things[}\DecValTok{2}\NormalTok{])} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "woolen mittens" \end{verbatim} \hypertarget{lists}{% \subsection{Lists}\label{lists}} Lists are similar to vectors in that you can create them and access items by their numeric position. Vectors must be all characters or all numbers. Lists can be a mix of characters or numbers. Most importantly items in lists can be accessed by their label. Note that I use \textbf{SHIFT RETURN} after each comma to move to the next line in the code below. \begin{Shaded} \begin{Highlighting}[] \NormalTok{my_list <-}\StringTok{ }\KeywordTok{list}\NormalTok{(}\DataTypeTok{last_name =} \StringTok{"Smith"}\NormalTok{,} \DataTypeTok{first_name =} \StringTok{"John"}\NormalTok{,} \DataTypeTok{office_number =} \DecValTok{1913}\NormalTok{)} \KeywordTok{print}\NormalTok{(my_list)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## $last_name ## [1] "Smith" ## ## $first_name ## [1] "John" ## ## $office_number ## [1] 1913 \end{verbatim} You can access an item in a list using double brackets: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_list[}\DecValTok{2}\NormalTok{])} \end{Highlighting} \end{Shaded} \begin{verbatim} ## $first_name ## [1] "John" \end{verbatim} You can access an item in a list by its label/name using the dollar sign: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_list}\OperatorTok{$}\NormalTok{last_name)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "Smith" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_list}\OperatorTok{$}\NormalTok{office_number)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 1913 \end{verbatim} \hypertarget{thats-it}{% \section{That's it!}\label{thats-it}} Congratulations! You've reached the end of the introduction to R. Take a break, have a cookie, and read some more about R tomorrow! \hypertarget{data-management-with-the-tidyverse}{% \chapter{Data Management with the Tidyverse}\label{data-management-with-the-tidyverse}} \hypertarget{required-packages}{% \section{Required Packages}\label{required-packages}} This chapter requires the following packages are installed: \begin{longtable}[]{@{}l@{}} \toprule Required Packages\tabularnewline \midrule \endhead tidyverse\tabularnewline \bottomrule \end{longtable} \hypertarget{basic-tidyverse-commands}{% \section{Basic Tidyverse Commands}\label{basic-tidyverse-commands}} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(tidyverse)} \NormalTok{okcupid_profiles <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\StringTok{"data_okcupid.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} You can see the first few rows of the data set using the print() command: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 59,946 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly an~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ ## 4 23 vegetarian 71 likes cats m single ## 5 29 <NA> 66 likes dogs and l~ m single ## 6 29 mostly anyt~ 67 likes cats m single ## 7 32 strictly an~ 65 likes dogs and l~ f single ## 8 31 mostly anyt~ 65 likes dogs and l~ f single ## 9 24 strictly an~ 67 likes dogs and l~ f single ## 10 37 mostly anyt~ 65 likes dogs and l~ m single ## # ... with 59,936 more rows \end{verbatim} But it's also helpful just to see a list of the columns in the data set with the glimpse() command: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 59,946 ## Columns: 6 ## $ age <dbl> 22, 35, 38, 23, 29, 29, 32, 31, 24, 37, ... ## $ diet <chr> "strictly anything", "mostly other", "an... ## $ height <dbl> 75, 70, 68, 71, 66, 67, 65, 65, 67, 65, ... ## $ pets <chr> "likes dogs and likes cats", "likes dogs... ## $ sex <chr> "m", "m", "m", "m", "m", "m", "f", "f", ... ## $ status <chr> "single", "single", "available", "single... \end{verbatim} \hypertarget{select}{% \subsection{select()}\label{select}} The select() command allows you to obtain a subset of the columns in your data. The command below obtains can be used to obtain the age and height columns. You can read the command as: take the okcupid\_profiles data and then select the age and height columns. You can see that this prints out the data with just the age and height columns. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ }\KeywordTok{select}\NormalTok{(age, height)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 59,946 x 2 ## age height ## <dbl> <dbl> ## 1 22 75 ## 2 35 70 ## 3 38 68 ## 4 23 71 ## 5 29 66 ## 6 29 67 ## 7 32 65 ## 8 31 65 ## 9 24 67 ## 10 37 65 ## # ... with 59,936 more rows \end{verbatim} Of course, it's usually of little help to just print the subset of the data. It's better to store it in a new data set. In the command below we store the resulting data set in a data set called new\_data. \begin{Shaded} \begin{Highlighting}[] \NormalTok{new_data <-}\StringTok{ }\NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ }\KeywordTok{select}\NormalTok{(age, height)} \end{Highlighting} \end{Shaded} The glimpse() command shows us that only the age and height columns are in new\_data. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(new_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 59,946 ## Columns: 2 ## $ age <dbl> 22, 35, 38, 23, 29, 29, 32, 31, 24, 37, ... ## $ height <dbl> 75, 70, 68, 71, 66, 67, 65, 65, 67, 65, ... \end{verbatim} In the above example we indicated the columns we wanted to retain from the okcupid\_profiles data in the select() command. However, we also indicate the columns we want to drop from the data set my using a minus sign (-) in front of the columns we specify in the select() command. \begin{Shaded} \begin{Highlighting}[] \NormalTok{new_data <-}\StringTok{ }\NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ }\KeywordTok{select}\NormalTok{(}\OperatorTok{-}\NormalTok{age, }\OperatorTok{-}\NormalTok{height)} \end{Highlighting} \end{Shaded} The glimpse() command shows us that we kept all the columns except the age and height columns when we created new\_data. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(new_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 59,946 ## Columns: 4 ## $ diet <chr> "strictly anything", "mostly other", "an... ## $ pets <chr> "likes dogs and likes cats", "likes dogs... ## $ sex <chr> "m", "m", "m", "m", "m", "m", "f", "f", ... ## $ status <chr> "single", "single", "available", "single... \end{verbatim} \hypertarget{summarise}{% \subsection{summarise()}\label{summarise}} The summarise() command can be used to generate descriptive statistics for a specified column. You can easily calculate column descriptives using the corresponding commands for mean(), sd(), min(), max(), among others. In the example below we calculate the mean for the age column. To move to the next line while typing this command press SHIFT and ENTER (or RETURN) at the same time. In code below mean(age, na.rm = TRUE) indicates that to R that it should calculate the mean of the age column. The na.rm indicates how missing values should be handled. The na stands for not available; in R missing values are classified as Not Available or NA. The rm stands for remove. Consequently, na.rm is asking should we remove missing values when calculating the mean. The TRUE indicates that yes, missing values should be removed when calculating the mean. The result of this calculation is place into a variable labelled age\_mean, though we could have used any label we wanted instead of age\_mean. We see that the mean of the age column is, with rounding, 32.3. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 1 x 1 ## age_mean ## <dbl> ## 1 32.3 \end{verbatim} More than one calculation can occur in the same summarise() command. You can easily add the calculation for the stanard deviation with the sd() command. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 1 x 2 ## age_mean age_sd ## <dbl> <dbl> ## 1 32.3 9.45 \end{verbatim} We can use the n() command to also display the number of participants in the calculation: \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{n =} \KeywordTok{n}\NormalTok{())} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 1 x 3 ## age_mean age_sd n ## <dbl> <dbl> <int> ## 1 32.3 9.45 59946 \end{verbatim} \hypertarget{filter}{% \subsection{filter()}\label{filter}} The filter() command allows you to obtain a subset of the rows in your data. In the example below we create a new data sets with just the males from the original data set. Notice the structure of the original data set below in the glimpse() output. There is a column called sex that used m and f to indicate male and female, respectively. Also notice that there are 59946 rows in the okcupid\_profiles data set. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(okcupid_profiles)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 59,946 ## Columns: 6 ## $ age <dbl> 22, 35, 38, 23, 29, 29, 32, 31, 24, 37, ... ## $ diet <chr> "strictly anything", "mostly other", "an... ## $ height <dbl> 75, 70, 68, 71, 66, 67, 65, 65, 67, 65, ... ## $ pets <chr> "likes dogs and likes cats", "likes dogs... ## $ sex <chr> "m", "m", "m", "m", "m", "m", "f", "f", ... ## $ status <chr> "single", "single", "available", "single... \end{verbatim} We use the the filter command to select a subset of the rows based on the contents of any column. In this case the sex column. Notice the use of the the double equals sign to indicate ``equal to''. The reason a double equals sign is used here it to distinguish it from the use of the equals in the summarise command above were it was used to indicate ``assign to'' age\_mean after the mean was calculated. Single equals sign indicates ``assign to'' whereas a double equals sign indicates ``is equal to''. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_males <-}\StringTok{ }\NormalTok{okcupid_profiles }\OperatorTok{%>%} \StringTok{ }\KeywordTok{filter}\NormalTok{(sex }\OperatorTok{==}\StringTok{ "m"}\NormalTok{)} \end{Highlighting} \end{Shaded} We use glimpse() to inspect this all male data set. Notice that only the letter m is in the sex column. Also notice that there are 35829 rows in the okcupid\_males data set; fewer people because only the males are in this data set. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(okcupid_males)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 35,829 ## Columns: 6 ## $ age <dbl> 22, 35, 38, 23, 29, 29, 37, 35, 28, 24, ... ## $ diet <chr> "strictly anything", "mostly other", "an... ## $ height <dbl> 75, 70, 68, 71, 66, 67, 65, 70, 72, 72, ... ## $ pets <chr> "likes dogs and likes cats", "likes dogs... ## $ sex <chr> "m", "m", "m", "m", "m", "m", "m", "m", ... ## $ status <chr> "single", "single", "available", "single... \end{verbatim} The filter command can be commbined with the summarise command to get the descriptive statistics for males without the hassle of creating a new data set. This is done using the \%\textgreater\% ``and then'' operator. \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%} \StringTok{ }\KeywordTok{filter}\NormalTok{(sex }\OperatorTok{==}\StringTok{ "m"}\NormalTok{) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{n =} \KeywordTok{n}\NormalTok{())} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 1 x 3 ## age_mean age_sd n ## <dbl> <dbl> <int> ## 1 32.0 9.03 35829 \end{verbatim} We see that for the 35829 females the mean age is 32.0 and the standard deviation is 9.0. Likewise, we can obtain the descriptive statistics for females with only a slight modification, changing m to f in the filter command: \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%} \StringTok{ }\KeywordTok{filter}\NormalTok{(sex }\OperatorTok{==}\StringTok{ "f"}\NormalTok{) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{n =} \KeywordTok{n}\NormalTok{())} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 1 x 3 ## age_mean age_sd n ## <dbl> <dbl> <int> ## 1 32.8 10.0 24117 \end{verbatim} We see that for the 24117 females the mean age is 32.8 and the standard deviation is 10.0. \hypertarget{group_by}{% \subsection{group\_by()}\label{group_by}} The process we used with the filter command would quickly become onerous if we had many subgroups for a column. Consequently, it's often better to use the group() command to calculate descriptives for the levels of a variable. By telling the computer to group\_by() sex the summarise command is run separately for every level of sex (i.e., m and f). \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%} \StringTok{ }\KeywordTok{group_by}\NormalTok{(sex) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{n =} \KeywordTok{n}\NormalTok{()) } \end{Highlighting} \end{Shaded} \begin{verbatim} ## `summarise()` ungrouping output (override with `.groups` argument) \end{verbatim} \begin{verbatim} ## # A tibble: 2 x 4 ## sex age_mean age_sd n ## <chr> <dbl> <dbl> <int> ## 1 f 32.8 10.0 24117 ## 2 m 32.0 9.03 35829 \end{verbatim} Fortunately, it's possible to use more than one grouping varible with the group\_by() command. In the code below we group by sex and status (i.e., dating status). \begin{Shaded} \begin{Highlighting}[] \NormalTok{okcupid_profiles }\OperatorTok{%>%} \StringTok{ }\KeywordTok{group_by}\NormalTok{(sex, status) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{age_mean =} \KeywordTok{mean}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{age_sd =} \KeywordTok{sd}\NormalTok{(age, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{),} \DataTypeTok{n =} \KeywordTok{n}\NormalTok{()) } \end{Highlighting} \end{Shaded} \begin{verbatim} ## `summarise()` regrouping output by 'sex' (override with `.groups` argument) \end{verbatim} \begin{verbatim} ## # A tibble: 10 x 5 ## # Groups: sex [2] ## sex status age_mean age_sd n ## <chr> <chr> <dbl> <dbl> <int> ## 1 f available 32.2 8.54 656 ## 2 f married 33.7 8.13 135 ## 3 f seeing someone 28.1 6.44 1003 ## 4 f single 33.0 10.2 22319 ## 5 f unknown 27.8 5.91 4 ## 6 m available 34.8 9.40 1209 ## 7 m married 38.7 10.1 175 ## 8 m seeing someone 30.8 7.06 1061 ## 9 m single 31.9 9.04 33378 ## 10 m unknown 40.7 8.87 6 \end{verbatim} The resulting output provide for age the mean and standard deviation for every combination of sex and dating status. The first five rows provide output for females at every level of dating status whereas the subsequent five rows provide output for males at every level of dating status. \hypertarget{mutate}{% \subsection{mutate()}\label{mutate}} The mutate() command can be used to calculate a new column in the data set. \hypertarget{advanced-tidyverse-commands}{% \section{Advanced Tidyverse Commands}\label{advanced-tidyverse-commands}} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(tidyverse)} \NormalTok{data_exp <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\StringTok{"data_experiment.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## id = col_double(), ## sex = col_character(), ## time1_vomit = col_double(), ## time1_aggression = col_double(), ## time2_vomit = col_double(), ## time2_aggression = col_double() ## ) \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(data_exp)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 6 ## Columns: 6 ## $ id <dbl> 1, 2, 3, 4, 5, 6 ## $ sex <chr> "male", "female", "male", "fem... ## $ time1_vomit <dbl> 3, 2, 0, 3, 2, 1 ## $ time1_aggression <dbl> 5, 6, 4, 7, 3, 8 ## $ time2_vomit <dbl> 2, 1, 1, 2, 1, 2 ## $ time2_aggression <dbl> 6, 7, 6, 7, 5, 8 \end{verbatim} \hypertarget{select-1}{% \subsection{select()}\label{select-1}} \hypertarget{select-using-where}{% \subsubsection{select() using where()}\label{select-using-where}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{data_numeric_columns <-}\StringTok{ }\NormalTok{data_exp }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{select}\NormalTok{(}\KeywordTok{where}\NormalTok{(is.numeric))} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(data_numeric_columns)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 6 ## Columns: 5 ## $ id <dbl> 1, 2, 3, 4, 5, 6 ## $ time1_vomit <dbl> 3, 2, 0, 3, 2, 1 ## $ time1_aggression <dbl> 5, 6, 4, 7, 3, 8 ## $ time2_vomit <dbl> 2, 1, 1, 2, 1, 2 ## $ time2_aggression <dbl> 6, 7, 6, 7, 5, 8 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{data_character_columns <-}\StringTok{ }\NormalTok{data_exp }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{select}\NormalTok{(}\KeywordTok{where}\NormalTok{(is.character))} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(data_character_columns)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 6 ## Columns: 1 ## $ sex <chr> "male", "female", "male", "female", "male",... \end{verbatim} \hypertarget{select-using-starts_with}{% \subsubsection{select() using starts\_with()}\label{select-using-starts_with}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{data_time1 <-}\StringTok{ }\NormalTok{data_exp }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{select}\NormalTok{(}\KeywordTok{starts_with}\NormalTok{(}\StringTok{"time1_"}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(data_time1)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 6 ## Columns: 2 ## $ time1_vomit <dbl> 3, 2, 0, 3, 2, 1 ## $ time1_aggression <dbl> 5, 6, 4, 7, 3, 8 \end{verbatim} \hypertarget{select-using-starts_with-1}{% \subsubsection{select() using starts\_with()}\label{select-using-starts_with-1}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{data_aggression <-}\StringTok{ }\NormalTok{data_exp }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{select}\NormalTok{(}\KeywordTok{ends_with}\NormalTok{(}\StringTok{"_aggression"}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(data_aggression)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 6 ## Columns: 2 ## $ time1_aggression <dbl> 5, 6, 4, 7, 3, 8 ## $ time2_aggression <dbl> 6, 7, 6, 7, 5, 8 \end{verbatim} \hypertarget{select-using-matches}{% \subsubsection{select() using matches()}\label{select-using-matches}} It's also possible to use \emph{regex} (regular expressiona) to select columns. Regex is a powerful way to indicating search/matching requirements for text - in this case the text of column names. You can learn about regex at \href{https://regexone.com}{RegexOne} and test your regex at \href{https://regex101.com}{Regex101}. Ideally though, as we discuss in the next chapter, you can use naming conventions are sufficiently considered you don't need regex or only rarely. The reason for this is that regex can be challenging to use. As Twitter user @ThatJenPerson noted ``Regex is like tequila: use it to try to solve a problem and now you have two problems.'' Nonetheless, at one or two points in the future we will use regex to solve a problem (but not tequila). \hypertarget{making-your-data-ready-for-analysis}{% \chapter{Making your data ready for analysis}\label{making-your-data-ready-for-analysis}} \hypertarget{required-packages-1}{% \section{Required Packages}\label{required-packages-1}} This chapter requires the following packages are installed: \begin{longtable}[]{@{}l@{}} \toprule Required Packages\tabularnewline \midrule \endhead apaTables\tabularnewline janitor\tabularnewline psych\tabularnewline tidyverse\tabularnewline \bottomrule \end{longtable} \textbf{Important Note:} that you should NOT use library(psych) at any point. There are major conflict between the psych package and the tidyverse. We will access the psych package command by preceding each command with psych:: instead of using library(psych). \hypertarget{objective}{% \section{Objective}\label{objective}} \hypertarget{context}{% \section{Context}\label{context}} Due to a number of high profile failure to replicate study results \citep{cos2015} it's become increasingly clear that there is a general crisis of confidence in many areas of science \citep{baker2016}. Statistical (and other) explanations have been offered \citep{simmons2011} for why it's hard to replicate results across different sets of data. However, scientists are also finding it challenging to recreate the numbers in their papers using their own data. Indeed, the editor of Molecular Brain asked authors to submit the data used to create the numbers in published papers and found that the wrong data was submitted for 40 out of 41 papers \citep{miyakawa2020}. Consequently, some researchers have suggested that it is critical to distinguish between replication and reproducibility \citep{patil2019}. Replication refers to trying to obtain the same result from a different data sets. Reproducibility refers to trying to obtain the same results from the same data set. It may seem that reproducibility should be given - but it's not. Correspondingly, there a trend is for journals and authors to adopt Transparency and Openness Promotion (TOP) \href{https://www.cos.io/our-services/top-guidelines}{guidelines}. These guidelines involve such things as making your materials, data, code, and analysis scripts available on public repositories so anyone can check your data. A new open science journal rating system has even emerged called the \href{https://topfactor.org}{TOP Factor}. The idea is not that open science articles are more trustworthy that other types of articles -- the idea is that trust doesn't play a role. Anyone can inspect the data using the scripts and data provided by authors. It's really just the same as making your science available for auditing the way financial records can be audited. The problems reported at Molecular Brain (doubtless is common to many journals) are avoided with open science - because the data and scripts needed to reproduce the numbers in the articles are uploaded prior to publication. The TOP open science guidelines have made an impact and some newer journals, such as Meta Psycchology, have fully embraced open science. Figure \ref{fig:metapsychology} shows the header from an \href{https://open.lnu.se/index.php/metapsychology/article/view/1630/2266}{article} in Meta Psychology that clearly delineates the open science attributes of the article. Take note that the header even specifies who checked that the analyses in the article were reproducible from the scripts and data made publicly available by the authors. \begin{figure} \includegraphics[width=1\linewidth]{ch_score_items/images/screenshot_metapsychology} \caption{Open science in an article header}\label{fig:metapsychology} \end{figure} In Canada, the majority of university research is funded by the Federal Government's Tri-Agency (i.e., NSERC, SSHRC, CIHR). The agency has a new draft \href{https://www.ic.gc.ca/eic/site/063.nsf/eng/h_83F7624E.html}{Data Management Policy} in which they state that ``\emph{The agencies believe that research data collected with the use of public funds belong, to the fullest extent possible, in the public domain and available for reuse by others.}'' This perspective of the funding agency differs from that of some researchers who incorrectly believe ``they own their data''. In Canada at least, the government makes it clear that tax payers fund the research so the data is public property. Additionally the Tri-Agency Data Management policy clearly indicates the responsibilities of funded researchers: "Responsibilities of researchers include: \begin{itemize} \tightlist \item incorporating data management best practices into their research; \item developing data management plans to guide the responsible collection, formatting, preservation and sharing of their data throughout the entire lifecycle of a research project and beyond; \item following the requirements of applicable institutional and/or funding agency policies and professional or disciplinary standards; \item acknowledging and citing datasets that contribute to their research; and \item staying abreast of standards and expectations of their disciplinary community." \end{itemize} As a result of this perspective on data, it's important that you think about structuring your data for reuse by yourself and others before you collect it. Toward this end, you will see documentation of your data file via data codebooks is critical. \hypertarget{begin-with-the-end-in-mind}{% \section{Begin with the end in mind}\label{begin-with-the-end-in-mind}} In this chapter we will walk you though the steps from data collection, data entry, loading raw data, and the creation of analytic data via preprocessing scripts. These steps are outlined in Figure \ref{fig:pipeline}. This figure makes a clear distinction between raw data and analytic data. Raw data refers to the data as you entered it into a spreadsheet or received it from survey software. Analytic data the data the has been structured and processed so that it is ready for analyis. This pre-processing could include such things as identifying categorical variables to the computer, combining multiple items measuring the same thing into scale scale scores, among other tasks. It's critial that you don't think of analysis of your results as being completely removed from the data collection and data entry choices you make. Poor choices at the data collection and data entry stage can make your life substantially more complicated when it comes time to write the preprocessing script that will convert your raw data to analytic data. The mantra of this chapter is \emph{begin with the end in mind}. \begin{figure} \includegraphics[width=0.85\linewidth]{ch_enter_load/images/pipeline} \caption{Data science pipeline by Roger Peng.}\label{fig:pipeline} \end{figure} It's difficult to being with the end in mind when you haven't read later chapters. So here we will be provide you with some general thoughts around different approaches to structuring data files and the naming conventions you use when creating those data files. \hypertarget{tidy-data-comparing-wide-vs-long-formats}{% \subsection{tidy data: Comparing wide vs long formats}\label{tidy-data-comparing-wide-vs-long-formats}} When conducting many types of analyses it is necessary to have data in a format called tidy data \citep{tidy-data}. \href{https://cran.r-project.org/web/packages/tidyr/vignettes/tidy-data.html}{tidy data} as defined by Hadley involves (among other requirements) that: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Each variable forms a column. \item Each observation forms a row. \end{enumerate} The tidy data format can be initially challenging for some researchers to understand because it is based on thinking about, and structuring data, in terms of observations/measurements instead of participants. In this section we will describe common approaches to entering animal and human particpant data and how they can be done keeping the tidy data requirement in mind. It's not essential that data be entered in a tidy data format but it is essential that you enter data in way that makes it easy to later convert data to a tidy data format. When dealing with animal or human participant data it's common to enter data into a spreadsheet. Each row of the spreadsheet is typically used to represent a single participant and each column of the spreadshet is used to represent a variable. \textbf{Between participant data}. Consider Table \ref{tab:betweenex} which illustrates between particpant data for six human particpants running 5 kilometers The first column is id, which indicates there are six unique participants and provides and identification number for each of them. The id is the varible and there is one observation per row - so the id column conforms to tidy data specification. The second column is sex, which is a varible, and there is one observation per for row, so sex also conforms to the tidy data specification. Finally, there is a last column five\_km\_time which is a variable with one observation per row -- also conforming to tidy data specification. Thus, single occasion between subject data like this conforms to the tidy data specification. There is usually nothing you need to do to convert between participant data (or cross-sectional data) to be in a tidy data format. \begin{table} \caption{\label{tab:betweenex}Between participant data entered one row per participant} \centering \begin{tabular}[t]{rlr} \toprule id & sex & elapsedtime\\ \midrule 1 & male & 40\\ 2 & female & 35\\ 3 & male & 38\\ 4 & female & 33\\ 5 & male & 42\\ 6 & female & 36\\ \bottomrule \end{tabular} \end{table} \textbf{Within participant data}. Consider Table \ref{tab:withinex} which illustrates within particpant data for six human particpants running 5 kilometers - but on three different occasions. The first column is id, which indicates there are six unique participants and provides and identification number for each of them. The id is the varible and there is one observation per row - so the id column conforms to tidy data specification. The second column is sex, which is a varible, and there is one observation per for row, so sex also conforms to the tidy data specification. Next, there are three different columns (march, may, july) representing the levels of a single variable. That is the within subject variable is occasion and the levels of that variable are march, june, and july. The march column contains the times for participants in March. The may column contians the times for particpants in May. The july column contains the times for participants in July, These three columns are not in a tidy data format. \begin{table} \caption{\label{tab:withinex}Within participant data entered one row per participant} \centering \begin{tabular}[t]{rlrrr} \toprule id & sex & march & may & july\\ \midrule 1 & male & 40 & 37 & 35\\ 2 & female & 35 & 32 & 30\\ 3 & male & 38 & 35 & 33\\ 4 & female & 33 & 30 & 28\\ 5 & male & 42 & 39 & 37\\ 6 & female & 36 & 33 & 31\\ \bottomrule \end{tabular} \end{table} \begin{table} \caption{\label{tab:withintidyex}A tidy data version of the within participant data} \centering \begin{tabular}[t]{rllr} \toprule id & sex & occasion & elapsedtime\\ \midrule 1 & male & march & 40\\ 1 & male & may & 37\\ 1 & male & july & 35\\ \addlinespace 2 & female & march & 35\\ 2 & female & may & 32\\ 2 & female & july & 30\\ \addlinespace 3 & male & march & 38\\ 3 & male & may & 35\\ 3 & male & july & 33\\ \addlinespace 4 & female & march & 33\\ 4 & female & may & 30\\ 4 & female & july & 28\\ \addlinespace 5 & male & march & 42\\ 5 & male & may & 39\\ 5 & male & july & 37\\ \addlinespace 6 & female & march & 36\\ 6 & female & may & 33\\ 6 & female & july & 31\\ \bottomrule \end{tabular} \end{table} The problem with the format of the data in Table \ref{tab:withinex} is that march, may, and july are levels of single variable, occasion, that is not represented in the data. Nowhere in the Table \ref{tab:withinex} can you see the variable occasion. This single variable is presented over three columns - a very confusing situation. Moreover, due to the way the columns are labelled it's not clear what is being measured. Nowhere in Table \ref{tab:withinex} can you see the variable time. Thus with the format used in \ref{tab:withinex} you don't know what the predictor (occassion) is nor do you know the dependent varible (time). Thus, a major problem with entering data in this format is that there are hidden variables in the data and you need insider knowledge to know the columns represent. That said, this is not necessarily a terrible way to enter your data as long as you have all of this missing information documented in a data code book. \begin{longtable}[]{@{}ll@{}} \toprule \begin{minipage}[b]{0.52\columnwidth}\raggedright Disadvantages one row per participant\strut \end{minipage} & \begin{minipage}[b]{0.42\columnwidth}\raggedright Advantages one row per participant\strut \end{minipage}\tabularnewline \midrule \endhead \begin{minipage}[t]{0.52\columnwidth}\raggedright 1) Predictor variable (\emph{occassion}) is hidden and spread over multiple columns\strut \end{minipage} & \begin{minipage}[t]{0.42\columnwidth}\raggedright 1) Easy to enter this way\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.52\columnwidth}\raggedright 2) Unclear that each month is a level of the predictor variable \emph{occasion}\strut \end{minipage} & \begin{minipage}[t]{0.42\columnwidth}\raggedright \strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.52\columnwidth}\raggedright 3) Dependent variable (\emph{elapsedtime}) is not indicated\strut \end{minipage} & \begin{minipage}[t]{0.42\columnwidth}\raggedright \strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.52\columnwidth}\raggedright 4) Unclear that \emph{elapsedtime} is the measurement in each month column\strut \end{minipage} & \begin{minipage}[t]{0.42\columnwidth}\raggedright \strut \end{minipage}\tabularnewline \bottomrule \end{longtable} Fortunately, the problems with Table \ref{tab:withinex} can be largely resolved by converting the data to the a tidy data format. This can be done with the pivot\_long() command that we will learn about later in the chapter. Thus, we can enter the data in the easy to enter format of Table \ref{tab:withinex} but then later convert it to a tidy data format. After this conversion the data will be appear as in Table \ref{tab:withintidyex}. For elapsedtime variable this data is now in the tidy data format. Each row corresponds to a single elapsedtime observed. Each column corresponds to a single variable. Somewhat problematically, however, sex if repeated three times for each person (i.e., over the three rows) - and this can be confusing. However, if the focus in on analyzing elapsed time this tidy data format makes sense. Why did we walk you through this technical treatment of structuring data within the computer at this point in time? So that you pay attention to the advice the follows. You can see at this point that you may well need to restructure your data for certain analyses. The ability to do so quickly and easily depends upon you following the advice in the rest of this chapter around the naming conventions for variables and other aspects of your analyses. You can imagine the challenges for converting the data in Figure \ref{tab:withinex} to the data in Figure \ref{tab:withintidyex} by hand. You want to be able to automate that process and others - which is made substantially easier if you following the forthcoming advice about naming conventions in the tidyverse. \hypertarget{data-collection-and-entry}{% \section{Data collection and entry}\label{data-collection-and-entry}} \hypertarget{question-with-numerical-answers}{% \subsection{Question with numerical answers}\label{question-with-numerical-answers}} \hypertarget{likert-type-items}{% \subsection{Likert-type items}\label{likert-type-items}} relate back to select function in previous chapter \hypertarget{multi-item-scales}{% \subsection{Multi-item scales}\label{multi-item-scales}} \hypertarget{questions-with-categorical-answers}{% \subsection{Questions with categorical answers}\label{questions-with-categorical-answers}} Avoid categorical if not really categorical Avoid numerical representation of categorical variables \hypertarget{levels-of-an-independent-varible}{% \subsection{Levels of an independent varible}\label{levels-of-an-independent-varible}} \hypertarget{levels-of-a-single-within-participant-variable}{% \subsection{Levels of a single within participant variable}\label{levels-of-a-single-within-participant-variable}} \hypertarget{levels-of-multiple-within-participant-variables}{% \subsection{Levels of multiple within participant variables}\label{levels-of-multiple-within-participant-variables}} relate back to select function in previous chapter Do this one first when generating above advice Surveys - use meaningful names - use meaningful names for subsets -sample size analysis -unfeasible article by lakens \begin{itemize} \tightlist \item data are collected in different ways \item data collected by programs in the lab that require \item lab measurements paper and entering later \item paper surveys and entering later \item website surveys \item a mix of all of the above \end{itemize} each trial is a row - multiple rows per person\ldots{} Distinguish between entering data and not. But also think about what's not confusing\ldots{} talk about wide not showing DV or IV just levels of IV if you're writing a cognitive program or making a survey think about the variable names think about how the data is outputted. If you have entirely within person data then tidy is the obvious choice If you have entirely between then wide makes sense If you have a mix - it's more difficult. Different research area. Think about the problems. For example, a good data code book is essential if you have wide with repeated measures variables because there is no way to tell what the IV or DV is by inspecting the data. TIDYVERSE NAMING CONVENTION ARE IMPORTANT BECAUSE IN SOME CASES THEY ARE REQUIRED for the SCRIPTS to work. If you haven't following the naming conventions you may be making your life quite difficult. Think about - missing data - column names - representation of categorical variables in the data set \begin{itemize} \tightlist \item computer/web collection \end{itemize} \hypertarget{raw-data}{% \section{Raw data}\label{raw-data}} We begin by examining the data as originally entered into a spreadsheet. In Figure \ref{fig:rawdataitems} you see a screen shot of the initial raw data as a researcher might receive it. Take careful note of the numerous -999 values used to indicate missing values. As part of creating the analytic data that we will analyze we need to indicate to the computer that the -999 are not data but codes to represent missing values. \begin{figure} \includegraphics[width=0.85\linewidth]{ch_score_items/images/screenshot_raw_data} \caption{Raw data for item scoring}\label{fig:rawdataitems} \end{figure} \hypertarget{loading-raw-data}{% \section{Loading raw data}\label{loading-raw-data}} I suggest you begin every R Studio task in the following way: R Studio in the Cloud 1. Create a new Project using the web interface 2. Upload your data files in using the upload button in the Files pane R Studio on Your Computer 1. Create a folder on your computer for the analysis 2. Place your data files in that folder 3. Use the menu item File \textgreater{} New Project\ldots{} to start the project 4. On the window that appears select ``Existing Directory'' 5. On the next screen, press the ``Browse'' button and find/select the folder with your data 6. Press the Create Project Button Regardless of whether your are working from the cloud or locally you should now have an R Studio project with your data files in it. Using Projects. \textbf{Class note: You don't' need to do either of these approach. You will just ``Start'' each assignment in the class workspace on R Studio Cloud".} Create an R Studio project for this activity Create a new script in your project and save it quite \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(tidyverse)} \KeywordTok{library}\NormalTok{(janitor)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Attaching package: 'janitor' \end{verbatim} \begin{verbatim} ## The following objects are masked from 'package:stats': ## ## chisq.test, fisher.test \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{my_missing_value_codes <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"-999"}\NormalTok{, }\StringTok{""}\NormalTok{, }\StringTok{"NA"}\NormalTok{)} \NormalTok{raw_data <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_item_scoring.csv"}\NormalTok{,} \DataTypeTok{na =}\NormalTok{ my_missing_value_codes)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## id = col_double(), ## age = col_double(), ## sex = col_character(), ## SE1 = col_double(), ## SE2 = col_double(), ## SE3 = col_double(), ## SE4 = col_double(), ## SE5 = col_double(), ## SE6 = col_double(), ## SE7 = col_double(), ## SE8 = col_double(), ## SE9 = col_double(), ## SE10 = col_double() ## ) \end{verbatim} \hypertarget{initial-inspection}{% \section{Initial inspection}\label{initial-inspection}} We use glimpse to do an initial inspection of the column names in this data set. All of the column name conform to \href{https://style.tidyverse.org}{tidyverse style guidelines} so we do not need to run the clean\_name() function from the janitor package \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 13 ## $ id <dbl> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17, 21, 20... ## $ sex <chr> "male", "female", "male", "female", "male"... ## $ SE1 <dbl> 3, 4, 4, 3, 3, 3, 3, 4, 4, 4, 3, 4, NA, NA... ## $ SE2 <dbl> 2, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, NA, 3,... ## $ SE3 <dbl> 4, 4, 4, 3, 4, 4, NA, 4, 4, 3, 4, 4, 4, NA... ## $ SE4 <dbl> 3, 4, 4, 3, 4, 4, 4, 4, 3, 4, NA, 4, 3, 3,... ## $ SE5 <dbl> 4, 4, 4, 4, 4, NA, NA, 4, 4, 4, 3, 4, 4, 3... ## $ SE6 <dbl> 3, 5, 4, 3, 3, 3, 3, 5, 3, 3, 3, 4, 4, 3, ... ## $ SE7 <dbl> 1, 1, 1, NA, 1, 1, 2, 1, 2, 2, 3, 1, 3, 2,... ## $ SE8 <dbl> 3, NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,... ## $ SE9 <dbl> NA, 5, 5, 4, 4, 4, 4, 5, NA, 4, NA, 5, 4, ... ## $ SE10 <dbl> 5, NA, 5, 4, 5, 4, 4, 5, 5, 5, 4, NA, 4, 5... \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{view}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} See Figure \ref{fig:narawdataitems} \begin{figure} \includegraphics[width=0.85\linewidth]{ch_score_items/images/screenshot_raw_data_na} \caption{Missing values now NA}\label{fig:narawdataitems} \end{figure} \hypertarget{handling-categorical-variables}{% \section{Handling categorical variables}\label{handling-categorical-variables}} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Turn all columns that are of type character into factors} \NormalTok{raw_data <-}\StringTok{ }\NormalTok{raw_data }\OperatorTok{%>%} \StringTok{ }\KeywordTok{mutate}\NormalTok{(}\KeywordTok{across}\NormalTok{(}\DataTypeTok{.cols =} \KeywordTok{where}\NormalTok{(is.character),} \DataTypeTok{.fns =}\NormalTok{ as.factor))} \end{Highlighting} \end{Shaded} We can see there was only one column that changes, but if there had been many columns that were characters they all would have changed. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 13 ## $ id <dbl> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17, 21, 20... ## $ sex <fct> male, female, male, female, male, female, ... ## $ SE1 <dbl> 3, 4, 4, 3, 3, 3, 3, 4, 4, 4, 3, 4, NA, NA... ## $ SE2 <dbl> 2, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, NA, 3,... ## $ SE3 <dbl> 4, 4, 4, 3, 4, 4, NA, 4, 4, 3, 4, 4, 4, NA... ## $ SE4 <dbl> 3, 4, 4, 3, 4, 4, 4, 4, 3, 4, NA, 4, 3, 3,... ## $ SE5 <dbl> 4, 4, 4, 4, 4, NA, NA, 4, 4, 4, 3, 4, 4, 3... ## $ SE6 <dbl> 3, 5, 4, 3, 3, 3, 3, 5, 3, 3, 3, 4, 4, 3, ... ## $ SE7 <dbl> 1, 1, 1, NA, 1, 1, 2, 1, 2, 2, 3, 1, 3, 2,... ## $ SE8 <dbl> 3, NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,... ## $ SE9 <dbl> NA, 5, 5, 4, 4, 4, 4, 5, NA, 4, NA, 5, 4, ... ## $ SE10 <dbl> 5, NA, 5, 4, 5, 4, 4, 5, 5, 5, 4, NA, 4, 5... \end{verbatim} It's often helpful to has id as a factor rather than a number so we add and extra command that changes this value: \begin{Shaded} \begin{Highlighting}[] \NormalTok{raw_data <-raw_data }\OperatorTok{%>%} \StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{id =} \KeywordTok{as.factor}\NormalTok{(id))} \end{Highlighting} \end{Shaded} Now it looks like our data is ready for the creation of scale scores: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 13 ## $ id <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17, 21, 20... ## $ sex <fct> male, female, male, female, male, female, ... ## $ SE1 <dbl> 3, 4, 4, 3, 3, 3, 3, 4, 4, 4, 3, 4, NA, NA... ## $ SE2 <dbl> 2, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, NA, 3,... ## $ SE3 <dbl> 4, 4, 4, 3, 4, 4, NA, 4, 4, 3, 4, 4, 4, NA... ## $ SE4 <dbl> 3, 4, 4, 3, 4, 4, 4, 4, 3, 4, NA, 4, 3, 3,... ## $ SE5 <dbl> 4, 4, 4, 4, 4, NA, NA, 4, 4, 4, 3, 4, 4, 3... ## $ SE6 <dbl> 3, 5, 4, 3, 3, 3, 3, 5, 3, 3, 3, 4, 4, 3, ... ## $ SE7 <dbl> 1, 1, 1, NA, 1, 1, 2, 1, 2, 2, 3, 1, 3, 2,... ## $ SE8 <dbl> 3, NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,... ## $ SE9 <dbl> NA, 5, 5, 4, 4, 4, 4, 5, NA, 4, NA, 5, 4, ... ## $ SE10 <dbl> 5, NA, 5, 4, 5, 4, 4, 5, 5, 5, 4, NA, 4, 5... \end{verbatim} \hypertarget{seeing-your-data}{% \section{Seeing your data}\label{seeing-your-data}} See the first six rows of the data with the \emph{head} command below. If you wanted to see all of the data you would use View(raw\_data). The NA values in the output indicate missing values (NA = Not Available). \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 6 x 13 ## id age sex SE1 SE2 SE3 SE4 SE5 SE6 ## <fct> <dbl> <fct> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> ## 1 1 23 male 3 2 4 3 4 3 ## 2 2 22 female 4 3 4 4 4 5 ## 3 3 18 male 4 3 4 4 4 4 ## 4 4 23 female 3 2 3 3 4 3 ## 5 5 22 male 3 2 4 4 4 3 ## 6 6 17 female 3 3 4 4 NA 3 ## # ... with 4 more variables: SE7 <dbl>, SE8 <dbl>, ## # SE9 <dbl>, SE10 <dbl> \end{verbatim} \hypertarget{dealing-with-reverse-key-items}{% \section{Dealing with reverse key items}\label{dealing-with-reverse-key-items}} Our first step is dealing with reverse key items. The way you deal with these items depends on how you scored them. Imagine you had a 5-point scale. You could have scored the scale with the values 1, 2, 3, 4, and 5. Alternatively, you could have scored the scale with the values 0, 1, 2, 3, and 4. In this example, we scored the data using the 1 to 5 system. So we'll use that. Later I'll show you how to deal with the other scoring system (0 to 4). \hypertarget{scoring-items-where-the-ratings-scale-starts-with-1}{% \subsection{Scoring items where the ratings scale starts with 1}\label{scoring-items-where-the-ratings-scale-starts-with-1}} We need to take items that were reversed-key when the participant wrote them and recode those responses. We do that with using the \emph{mutate} command from the \emph{dplyr} package. In this data file the only reverse-key item was SE7 (we known this from when we created the survey). We use the command below to reverse key an item with response options ranging from 1 to 5. So we use 6 in the command (i.e., one higher than 5). \begin{Shaded} \begin{Highlighting}[] \NormalTok{raw_data <-raw_data }\OperatorTok{%>%} \StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{SE7c =} \DecValTok{6} \OperatorTok{-}\StringTok{ }\NormalTok{SE7)} \end{Highlighting} \end{Shaded} The command above creates a new column in raw\_data called SE7c that has the reverse-keyed values for SE7 in it. You can see the new SE7c column using command below that displays the first six rows of the data. The SE7c column is at the far right of the data displayed. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 14 ## $ id <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17, 21, 20... ## $ sex <fct> male, female, male, female, male, female, ... ## $ SE1 <dbl> 3, 4, 4, 3, 3, 3, 3, 4, 4, 4, 3, 4, NA, NA... ## $ SE2 <dbl> 2, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, NA, 3,... ## $ SE3 <dbl> 4, 4, 4, 3, 4, 4, NA, 4, 4, 3, 4, 4, 4, NA... ## $ SE4 <dbl> 3, 4, 4, 3, 4, 4, 4, 4, 3, 4, NA, 4, 3, 3,... ## $ SE5 <dbl> 4, 4, 4, 4, 4, NA, NA, 4, 4, 4, 3, 4, 4, 3... ## $ SE6 <dbl> 3, 5, 4, 3, 3, 3, 3, 5, 3, 3, 3, 4, 4, 3, ... ## $ SE7 <dbl> 1, 1, 1, NA, 1, 1, 2, 1, 2, 2, 3, 1, 3, 2,... ## $ SE8 <dbl> 3, NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,... ## $ SE9 <dbl> NA, 5, 5, 4, 4, 4, 4, 5, NA, 4, NA, 5, 4, ... ## $ SE10 <dbl> 5, NA, 5, 4, 5, 4, 4, 5, 5, 5, 4, NA, 4, 5... ## $ SE7c <dbl> 5, 5, 5, NA, 5, 5, 4, 5, 4, 4, 3, 5, 3, 4,... \end{verbatim} We have reverse keyed one item. So now when we create our scale we will use item SE7c (the c stands for correctly coded) instead of the original item SE7. That is, we will use items SE1, SE2, SE3, SE4, SE5, SE6, \textbf{SE7c}, SE8, SE9, and SE10 to form the scale. \textbf{Note for a box} multiple items what it looks like \textbf{Note FOR A BOX }. If you had used response options numbered 0 to 4 for each item you would use the command below instead. Note that we use 4 in the command this time instead of a value one higher. \begin{Shaded} \begin{Highlighting}[] \NormalTok{raw_data <-}\StringTok{ }\KeywordTok{mutate}\NormalTok{(raw_data, }\DataTypeTok{SE7c =} \DecValTok{4} \OperatorTok{-}\StringTok{ }\NormalTok{SE7) } \end{Highlighting} \end{Shaded} \hypertarget{creating-the-scale-score}{% \section{Creating the scale score}\label{creating-the-scale-score}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{raw_data <-}\StringTok{ }\NormalTok{raw_data }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{rowwise}\NormalTok{() }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{self_esteem =} \KeywordTok{mean}\NormalTok{(}\KeywordTok{c}\NormalTok{(SE1, SE2, SE3, SE4, SE5, SE6, SE7c, SE8, SE9, SE10),} \DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{)) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{ungroup}\NormalTok{()} \end{Highlighting} \end{Shaded} When you see ungroup() in this context you can think of it as ``turn off rowwise''. We can see our data now has the self esteem column: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(raw_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 15 ## $ id <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17,... ## $ sex <fct> male, female, male, female, male, f... ## $ SE1 <dbl> 3, 4, 4, 3, 3, 3, 3, 4, 4, 4, 3, 4,... ## $ SE2 <dbl> 2, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2,... ## $ SE3 <dbl> 4, 4, 4, 3, 4, 4, NA, 4, 4, 3, 4, 4... ## $ SE4 <dbl> 3, 4, 4, 3, 4, 4, 4, 4, 3, 4, NA, 4... ## $ SE5 <dbl> 4, 4, 4, 4, 4, NA, NA, 4, 4, 4, 3, ... ## $ SE6 <dbl> 3, 5, 4, 3, 3, 3, 3, 5, 3, 3, 3, 4,... ## $ SE7 <dbl> 1, 1, 1, NA, 1, 1, 2, 1, 2, 2, 3, 1... ## $ SE8 <dbl> 3, NA, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3... ## $ SE9 <dbl> NA, 5, 5, 4, 4, 4, 4, 5, NA, 4, NA,... ## $ SE10 <dbl> 5, NA, 5, 4, 5, 4, 4, 5, 5, 5, 4, N... ## $ SE7c <dbl> 5, 5, 5, NA, 5, 5, 4, 5, 4, 4, 3, 5... ## $ self_esteem <dbl> 3.556, 4.250, 4.100, 3.222, 3.700, ... \end{verbatim} (Alternative code: removing ITEM 7 and using beings with: show full example) Think about this when creating names for your column \hypertarget{creatinig-analytic-data-from-raw-data}{% \section{Creatinig analytic data from raw data}\label{creatinig-analytic-data-from-raw-data}} Select only the columns you will use in your analysis: \begin{Shaded} \begin{Highlighting}[] \NormalTok{analytic_data <-}\StringTok{ }\NormalTok{raw_data }\OperatorTok{%>%} \StringTok{ }\KeywordTok{select}\NormalTok{(id, age, sex, self_esteem)} \end{Highlighting} \end{Shaded} We can see our new data set has only these columns of interest: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{glimpse}\NormalTok{(analytic_data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Rows: 300 ## Columns: 4 ## $ id <fct> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... ## $ age <dbl> 23, 22, 18, 23, 22, 17, 23, 22, 17,... ## $ sex <fct> male, female, male, female, male, f... ## $ self_esteem <dbl> 3.556, 4.250, 4.100, 3.222, 3.700, ... \end{verbatim} \hypertarget{wait-i-need-alpha}{% \section{Wait: I need alpha!}\label{wait-i-need-alpha}} We return to the raw\_data file that has the original item data to obtain Cronbach's alpha which is labeled ``raw alpha'' in the output. \begin{Shaded} \begin{Highlighting}[] \NormalTok{self_esteem_item_analysis <-}\StringTok{ }\NormalTok{raw_data }\OperatorTok{%>%} \StringTok{ }\KeywordTok{select}\NormalTok{(SE1, SE2, SE3, SE4, SE5, SE6, SE7c, SE8, SE9, SE10) }\OperatorTok{%>%} \StringTok{ }\NormalTok{psych}\OperatorTok{::}\KeywordTok{alpha}\NormalTok{()} \KeywordTok{print}\NormalTok{(self_esteem_item_analysis}\OperatorTok{$}\NormalTok{total)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## raw_alpha std.alpha G6(smc) average_r S/N ase mean ## 0.8278 0.8333 0.8276 0.3333 4.999 0.0143 3.656 ## sd median_r ## 0.3392 0.3277 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# To see the full item analysis use:} \CommentTok{# print(self_esteem_item_analysis)} \end{Highlighting} \end{Shaded} \hypertarget{wait-i-need-item-correlations-and-descriptive-statistics}{% \section{Wait: I need item correlations and descriptive statistics}\label{wait-i-need-item-correlations-and-descriptive-statistics}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{SE_items <-}\StringTok{ }\NormalTok{raw_data }\OperatorTok{%>%}\StringTok{ } \StringTok{ }\KeywordTok{select}\NormalTok{(}\KeywordTok{starts_with}\NormalTok{(}\StringTok{"SE"}\NormalTok{, }\DataTypeTok{ignore.case =} \OtherTok{FALSE}\NormalTok{))} \NormalTok{psych}\OperatorTok{::}\KeywordTok{describe}\NormalTok{(SE_items)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## vars n mean sd median ## SE1 1 276 3.39 0.54 3 ## SE2 2 272 2.35 0.48 2 ## SE3 3 269 3.96 0.37 4 ## SE4 4 285 3.54 0.50 4 ## SE5 5 265 3.78 0.47 4 ## SE6 6 275 3.34 0.51 3 ## SE7 7 273 1.51 0.61 1 ## SE8 8 272 2.84 0.37 3 ## SE9 9 265 4.29 0.70 4 ## SE10 10 276 4.57 0.61 5 ## SE7c 11 273 4.49 0.61 5 \end{verbatim} When you run the cor command you have to indicate how the it will handle missing the data. The options are below. You can learn more about what each one of these options means by typing: \textbf{?cor} into the Console, this will bring up the help page for the cor command. \begin{longtable}[]{@{}l@{}} \toprule Missing data options for cor\tabularnewline \midrule \endhead everything\tabularnewline all.obs\tabularnewline complete.obs\tabularnewline na.or.complete\tabularnewline pairwise.complete.obs\tabularnewline \bottomrule \end{longtable} \begin{Shaded} \begin{Highlighting}[] \NormalTok{SE_items }\OperatorTok{%>%} \StringTok{ }\KeywordTok{cor}\NormalTok{(}\DataTypeTok{use =} \StringTok{"pairwise.complete.obs"}\NormalTok{) }\OperatorTok{%>%} \StringTok{ }\KeywordTok{round}\NormalTok{(}\DecValTok{2}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## SE1 SE2 SE3 SE4 SE5 SE6 SE7 SE8 SE9 ## SE1 1.00 0.31 0.20 0.34 0.34 0.33 -0.32 0.28 0.38 ## SE2 0.31 1.00 0.24 0.22 0.31 0.32 -0.29 0.29 0.36 ## SE3 0.20 0.24 1.00 0.28 0.30 0.23 -0.43 0.38 0.35 ## SE4 0.34 0.22 0.28 1.00 0.29 0.34 -0.37 0.32 0.32 ## SE5 0.34 0.31 0.30 0.29 1.00 0.39 -0.41 0.41 0.43 ## SE6 0.33 0.32 0.23 0.34 0.39 1.00 -0.32 0.24 0.34 ## SE7 -0.32 -0.29 -0.43 -0.37 -0.41 -0.32 1.00 -0.44 -0.42 ## SE8 0.28 0.29 0.38 0.32 0.41 0.24 -0.44 1.00 0.51 ## SE9 0.38 0.36 0.35 0.32 0.43 0.34 -0.42 0.51 1.00 ## SE10 0.31 0.34 0.28 0.19 0.40 0.25 -0.39 0.35 0.36 ## SE7c 0.32 0.29 0.43 0.37 0.41 0.32 -1.00 0.44 0.42 ## SE10 SE7c ## SE1 0.31 0.32 ## SE2 0.34 0.29 ## SE3 0.28 0.43 ## SE4 0.19 0.37 ## SE5 0.40 0.41 ## SE6 0.25 0.32 ## SE7 -0.39 -1.00 ## SE8 0.35 0.44 ## SE9 0.36 0.42 ## SE10 1.00 0.39 ## SE7c 0.39 1.00 \end{verbatim} \hypertarget{using-r-the-old-way-or-the-new-way-the-tidyverse-way}{% \section{Using R the old way or the new way (the tidyverse way)}\label{using-r-the-old-way-or-the-new-way-the-tidyverse-way}} Previously we noted that there is an older way of using R (base R) and the new way of using R (the tidyverse) that we will use. Sometimes students have problems with their code when they mix and match these approaches using a bit of both. We will be using the tidyverse approach to using R but on the internet you will often see sample code that uses the older base R approach. A bit of background knowledge is helpful for understanding why we do things one way (e.g., read\_csv with the tidyverse) instead of another (e.g., read.csv with base R). \hypertarget{tibbles-vs-data-frames-why-use-read_csv-instead-of-read.csv}{% \subsubsection{Tibbles vs Data Frames: Why use read\_csv instead of read.csv}\label{tibbles-vs-data-frames-why-use-read_csv-instead-of-read.csv}} When you load data into R it is typically represented in one of two formats inside the computer - depending on the command you used. The original format for representing a data set in R is the data frame. You will see this term used frequently when you read about R. When you load data using read.csv your data is loaded into a data frame in the computer. That is your data is represented in the memory of the computer in particular format and structure called a data frame. \hypertarget{read.csv-puts-data-into-a-data-frame}{% \subsubsection{read.csv puts data into a data frame}\label{read.csv-puts-data-into-a-data-frame}} \begin{Shaded} \begin{Highlighting}[] \NormalTok{my_dataframe <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_okcupid.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} Notice that when you print a data frame it does NOT show you the number of rows or columns above the data like our example did with the okcupid\_profiles data. It also list ALL of your data rather than just the first few rows. As a result in the output below I show only the first 10 rows of the output - because all the rows are printed in your Console (too much to show here). \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_dataframe)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## age diet height pets ## 1 22 strictly anything 75 likes dogs and likes cats ## 2 35 mostly other 70 likes dogs and likes cats ## 3 38 anything 68 has cats ## 4 23 vegetarian 71 likes cats ## 5 29 <NA> 66 likes dogs and likes cats ## 6 29 mostly anything 67 likes cats ## 7 32 strictly anything 65 likes dogs and likes cats ## 8 31 mostly anything 65 likes dogs and likes cats ## 9 24 strictly anything 67 likes dogs and likes cats ## 10 37 mostly anything 65 likes dogs and likes cats ## sex status ## 1 m single ## 2 m single ## 3 m available ## 4 m single ## 5 m single ## 6 m single ## 7 f single ## 8 f single ## 9 f single ## 10 m single \end{verbatim} \hypertarget{read_csv-puts-data-into-a-tibble}{% \subsubsection{read\_csv puts data into a tibble}\label{read_csv-puts-data-into-a-tibble}} When you use the read\_csv command the data you load is stored in the computer as a tibble. The tibble is modern version of the data frame. Notice that when you print a tibble it DOES show you the number of rows and columns. As well, the tibble only provides the first few rows of output so it doesn't fill your screen. \begin{Shaded} \begin{Highlighting}[] \NormalTok{my_tibble <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\DataTypeTok{file =} \StringTok{"data_okcupid.csv"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## age = col_double(), ## diet = col_character(), ## height = col_double(), ## pets = col_character(), ## sex = col_character(), ## status = col_character() ## ) \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{print}\NormalTok{(my_tibble)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # A tibble: 59,946 x 6 ## age diet height pets sex status ## <dbl> <chr> <dbl> <chr> <chr> <chr> ## 1 22 strictly an~ 75 likes dogs and l~ m single ## 2 35 mostly other 70 likes dogs and l~ m single ## 3 38 anything 68 has cats m availa~ ## 4 23 vegetarian 71 likes cats m single ## 5 29 <NA> 66 likes dogs and l~ m single ## 6 29 mostly anyt~ 67 likes cats m single ## 7 32 strictly an~ 65 likes dogs and l~ f single ## 8 31 mostly anyt~ 65 likes dogs and l~ f single ## 9 24 strictly an~ 67 likes dogs and l~ f single ## 10 37 mostly anyt~ 65 likes dogs and l~ m single ## # ... with 59,936 more rows \end{verbatim} \hypertarget{deeper-differences-between-data-frames-and-tibbles}{% \subsubsection{Deeper differences between data frames and tibbles}\label{deeper-differences-between-data-frames-and-tibbles}} In short you should always use tibbles (i.e., use read\_csv) - they are simply enhanced data frames (i.e., the new version of the data frame). The differences between data frames and tibbles run deeper than the superficial output provided here. On some rare occasions an old package or command may not work with a tibble so you need to make it a data frame. You can do so with the commands below: \hypertarget{converting-a-tibble-into-a-data-frame}{% \subsubsection{Converting a tibble into a data frame}\label{converting-a-tibble-into-a-data-frame}} This command creates a new data set called new\_data\_frame (use any name you want) from the tibble data. \begin{Shaded} \begin{Highlighting}[] \NormalTok{new_dataframe <-}\StringTok{ }\KeywordTok{as.data.frame}\NormalTok{(my_tibble)} \end{Highlighting} \end{Shaded} \cleardoublepage \hypertarget{appendix-appendix}{% \appendix \addcontentsline{toc}{chapter}{\appendixname}} \hypertarget{more-to-say}{% \chapter{More to Say}\label{more-to-say}} Yeah! I have finished my book, but I have more to say about some topics. Let me explain them in this appendix. To know more about \textbf{bookdown}, see \url{https://bookdown.org}. \bibliography{book.bib,packages.bib} \backmatter \printindex \end{document}
{ "alphanum_fraction": 0.703072998, "avg_line_length": 46.4821501847, "ext": "tex", "hexsha": "ea09bbf300bd4523443d21f4f4a2bd95e694c427", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-01-17T18:04:59.000Z", "max_forks_repo_forks_event_min_datetime": "2022-01-17T18:04:59.000Z", "max_forks_repo_head_hexsha": "ae342e0cf530517eebac66993381de4bc07234ce", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dstanley4/psyc4780bookdown", "max_forks_repo_path": "_book/bookdown.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ae342e0cf530517eebac66993381de4bc07234ce", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dstanley4/psyc4780bookdown", "max_issues_repo_path": "_book/bookdown.tex", "max_line_length": 1023, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1623b074092bd36d4a31bda05fd5525d9c91dd22", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dstanley4/psyc3250bookdown", "max_stars_repo_path": "_book/bookdown.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-17T18:04:24.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-17T18:04:24.000Z", "num_tokens": 34500, "size": 113277 }
\section{Status} \paragraph*{Status of GSL-Library} The gsl-library is since version 1.0 stable and for general use. More information about it at \url{http://www.gnu.org/software/gsl/}. \paragraph*{Status of this interface} Nearly all modules are wrapped. A lot of tests are covering various functionality. Please report to the mailing list \url{[email protected]} if you find a bug. The hankel modules have been wrapped. Please write to the mailing list \url{[email protected]} if you require one of the modules and are willing to help with a simple example. If any other function is missing or some other module (e.g. ntuple) or function, do not hesitate to write to the list. \paragraph*{Retriving the Interface} You can download it here: \url{http://sourceforge.net/projects/pygsl} \section{Requirements} To build the interface, you will need \begin{itemize} \item \ulink{gsl-1.x}{http://sources.redhat.com/gsl}, \item \ulink{python2.2}{http://www.python.org} or better, \item \ulink{NumPy}{http://numpy.sf.net}, and \item a c compiler (like \ulink{gcc}{http://gcc.gnu.org}). \end{itemize} Supported Platforms are: \begin{itemize} \item Linux (Redhat/Debian/SuSE) with python2.* and gsl-1.* \item Win32 \end{itemize} It was tested and is tested on an irregular basis on the following platforms \begin{itemize} \item SUN \item Cygwin \item MacOS X \end{itemize} but is supposed to build on any POSIX platforms. \section{Installing the pygsl interface} \program{gsl-config} must be on your path:\nopagebreak \begin{verbatim} # unpack the source distribution gzip -d -c pygsl-x.y.z.tar.gz|tar xvf- cd pygsl-x.y.z # do this with your prefered python version # to set the gsl location explicitly use setup.py --gsl-prefix=/path/to/gsl python setup.py build # change to an user id, that is allowed to do installation python setup.py install \end{verbatim} Ready.... {\bf Do not test the interface in the distribution root or in the directories \file{src} or \file{pygsl}.} \subsection{Building on win32} Windows by default does not allow to run a posix shell. Here a different path is required. First change into the directory \file{gsl_dist}. Copy the file \file{gsl_site_example.py} and edit it to reflect your installation of GSL and SWIG if you want to run it yourself. The pygsl windows binaries distributed over \url{http://sourceforge.net/projects/pygsl/} are built using the mingw32 compiler. \paragraph*{Uninstall GSL interface} \code{rm -r }"python install path"\code{/lib/python}"version"\code{/site-packages/pygsl} \paragraph*{Testing} the directory \file{tests} contains several testsuites, based on python \module{unittest}. The script \file{run_test.py} in this directory will run one after the other. \paragraph*{Support} Please send mails to our mailinglist at \email{[email protected]}. \paragraph*{Developement} You can browse our cvs tree at \url{http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/pygsl/pygsl/}. \\ Type this to check out the actual version: \begin{verbatim} cvs -d:pserver:[email protected]:/cvsroot/pygsl login #Hit return for no password. cvs -z3 -d:pserver:[email protected]:/cvsroot/pygsl co pygsl \end{verbatim} The script \program{tools/extract_tool.py} generates most of the special function code. %\input{install_advanced.tex} \paragraph*{ToDo} Implement other parts: \paragraph*{History} \begin{itemize} \item a gsl-interface for python was needed for a project at \ulink{Center for Applied Informatics Cologne}{http://www.zaik.uni-koeln.de/AFS}. \item \file{gsl-0.0.3} was released at May 23, 2001 \item \file{gsl-0.0.4} was released at January 8, 2002 \item \file{gsl-0.0.5} is growing since January, 2002 \item \file{gsl-0.2.0} was released at \item \file{gsl-0.3.0} was released at \item \file{gsl-0.3.1} was released at \item \file{gsl-0.3.2} was released at \item \file{gsl-0.9.4} was released at 25. October 2008 \end{itemize} \paragraph*{Thanks} Jochen K\"upper (\email{[email protected]}) for \module{pygsl.statistics} part\\ Fabian Jakobs for \module{pygsl.blas}, \module{pygsl.eigen} \module{pygsl.linalg}, \module{pygsl.permutation}\\ Leonardo Milano for rpm build\\ Eric Gurrola and Peter Stoltz for testing and supporting the port of pygsl to the MAC\\ Sebastien Maret for supporting the Fink \url{http://fink.sourceforge.net} port of pygsl. \paragraph*{Maintainers} Achim G\"adke (\email{[email protected]}),\\ Pierre Schnizer (\email{[email protected]})
{ "alphanum_fraction": 0.7616965457, "avg_line_length": 34.3909774436, "ext": "tex", "hexsha": "d94e020e7168a31fe2851933a0ffa84404de6638", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "juhnowski/FishingRod", "max_forks_repo_path": "production/pygsl-0.9.5/doc/ref/install.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "juhnowski/FishingRod", "max_issues_repo_path": "production/pygsl-0.9.5/doc/ref/install.tex", "max_line_length": 88, "max_stars_count": null, "max_stars_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "juhnowski/FishingRod", "max_stars_repo_path": "production/pygsl-0.9.5/doc/ref/install.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1350, "size": 4574 }
% !TeX root = ../index.tex \chapter{Web Services Technology} A \textit{Web Service} is a service accessible on the Internet via the World Wide Web that people can interact with using electronic devices. The underlying technology of Web Services is typically either \textit{AJAX} (Asynchronous JavaScript And XML) or \textit{REST} (Representational State Transfer). While AJAX was originally intended to use the XML markup language for transferring data, it is also commonly used to transfer data using the JSON file format. AJAX allowed Web Services to send and receive new data in the background to avoid interrupting the end-user's experience, such as no longer having require a page refresh when submitting data to the server. RESTful Web Services use a uniform and predefined set of operations (GET, PUT, PATCH, POST, DELETE and OPTIONS) that are completely agnostic to previous requests sent/received. There are many security concerns related to Web Services. One is the risk of XML injections as many Web Services will use user-input when requesting data from their database(s). If user input is not validated or sanitised (e.g by using prepared statements) then an attacker is able to craft requests that can expose the data stored in the database - or potentially delete it. Another security concern is with session hijacking with Web Services that have a user authentication system. When a user logs in, a session ID is created that is unique to the user. This is then stored on the client's device typically as a Cookie or a LocalStorage key. On all requests the session ID is included so that the server can authenticate the user. If these requests are intercepted (e.g on a public WiFi network) then you can hijack and gain access to that user's account using their session ID. Hijacking session IDs by the interception of requests can be prevented by having a secure connection between the client and server by using Transport Layer Security.
{ "alphanum_fraction": 0.8051020408, "avg_line_length": 178.1818181818, "ext": "tex", "hexsha": "44d148286be6c5af33afa8a8d32d35dfd5da8009", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5f62fca7e87bd0dd1d5ac19a771f7a346915ec6c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wopian/hibari-api", "max_forks_repo_path": "report/1-research/2-web-services.tex", "max_issues_count": 36, "max_issues_repo_head_hexsha": "5f62fca7e87bd0dd1d5ac19a771f7a346915ec6c", "max_issues_repo_issues_event_max_datetime": "2018-05-29T23:27:08.000Z", "max_issues_repo_issues_event_min_datetime": "2018-02-26T10:37:35.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hibari-moe/api", "max_issues_repo_path": "report/1-research/2-web-services.tex", "max_line_length": 703, "max_stars_count": 2, "max_stars_repo_head_hexsha": "5f62fca7e87bd0dd1d5ac19a771f7a346915ec6c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hibari-moe/api", "max_stars_repo_path": "report/1-research/2-web-services.tex", "max_stars_repo_stars_event_max_datetime": "2018-04-28T04:27:57.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-05T22:30:43.000Z", "num_tokens": 391, "size": 1960 }
\section{\uppercase{Introduction}}\label{sec:introduction} %Problem description %Relevance of work / motivation %Usages % multiple sensor deployment for: % active perception % bin picking % improve confidence in object recognition % decide type, number and spatial distribution of sensors % guide dynamic sensors %Implementation highlights % modelling of 4 environments and 8 sensor types within Gazebo % sensor deployment within rois in environments with high occlusion of target objects % generation of the segmented sensor point clouds % color segmentation % 3d point cloud generation from depth image using the pin hole model % voxel grid filtering for regular space partition and fast coverage estimation % quick estimation of the best sensor % ransac approach to estimate a constellation of sensors %Difficulties that it overcomes %Main results %Paper outline \noindent Object recognition within environments with large and dynamic occlusions is a challenging task that can be tackled by either deploying an extensive and expensive sensor constellation or by actively moving a set of sensors within the environment in order to maximize the observable surface area of the target objects. This is a challenging combinatorial explosive problem that has a wide range of applications within the active perception domain, such as robust object recognition for bin picking operations, object tracking for robot programming by demonstration, next best view estimation for object 3D reconstruction, among many others. With these goals and possible applications in mind, it was developed a system for the estimation of the sensor constellation that maximizes the observable surface area of a given set of targets objects within a simulated scene with occluding geometry. The systems allows to estimate static, dynamic and hybrid constellations of sensors in simulation, making it suitable as a decision support system for selecting the type, number and spatial disposition of sensors; as a control support system for guiding moving sensors within dynamic scenes in order to avoid major occlusions and be able to keep tracking the target objects; and also as a recognition support system for improving the confidence of object detection system (given an initial estimation of the target object and the occluding objects, where should the dynamic sensor move, in order to gather additional sensor information to improve the confidence in the object recognition and increase the accuracy of the pose estimation). The development of the proposed system was split into 4 main stages. In the first step it was modeled the 3D scene geometry of both target and occluding objects, which was necessary to create the 4 simulation environments targeting active perception and bin picking operations. Then, a set of sensor populations was deployed in each environment within regions of interest in which useful sensor data could be retrieve (given the sensors characteristics and physical constraints of the real sensors). Each population was of a specific sensor type that simulated the main hardware characteristics of commercially available sensors, such as the depth camera resolution, its field of view, range of valid measurements and acquisition rate. The third step included the generation and analysis of the sensor data for each sensor. This included the extraction of the target objects point clouds using color segmentation (target objects had a unique color material that was not affected by lighting effects), followed by the 3D projection of the 2D depth pixels using the pinhole camera model, which were later on transformed into the world coordinate system (for fast merging of data from different sensors) and filtered with a voxel grid. This filtering step was critical to ensure consistent surface area evaluation even when sensors with different image resolution where observing the same surface area at varying distances. This regular space partition assumes that too many points within a small region do not contribute to better 3D perception, and as such, a given surface cell can be considered as observed if it has at least one sensor measurement. This approach also allows to very efficiently compute the surface area coverage (by simply dividing the number of observed voxels by the number of expected surface voxels). Finally, in the forth stage, the best sensor constellation for each testing environment was estimated. If only one sensor was available, the sensor with the best surface coverage was selected. On the other hand, if several sensors could be used, it was employed a \gls{ransac} approach to estimate the N sensors that when merging and filtering their measurements managed to achieve the best surface coverage of the target objects. In the following section it will be given a brief overview of related work developed over the years in the areas of best view estimation and sensor deployment. Later on, \cref{sec:modeling} will present how the 3D testing environments were created, including the sensors modeling and deployment. Then in \cref{sec:best-views-estimation} it will be introduced the algorithms used to process the sensor data and estimate the best sensor constellation, which will be supported by an experimental evaluation that will be discussed in \cref{sec:results}. Finally, \cref{sec:conclusions} will present the conclusions and give some prospects for future work.
{ "alphanum_fraction": 0.8236269812, "avg_line_length": 169.5625, "ext": "tex", "hexsha": "3b9b63e83605e2d6d0fd94eeb0dc2e4792e515e4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "44838a7bb96da2b807bfb3bfec29d8564b9f4c7f", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "carlosmccosta/active_perception_article", "max_forks_repo_path": "tex/sections/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "44838a7bb96da2b807bfb3bfec29d8564b9f4c7f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "carlosmccosta/active_perception_article", "max_issues_repo_path": "tex/sections/introduction.tex", "max_line_length": 2254, "max_stars_count": null, "max_stars_repo_head_hexsha": "44838a7bb96da2b807bfb3bfec29d8564b9f4c7f", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "carlosmccosta/active_perception_article", "max_stars_repo_path": "tex/sections/introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1029, "size": 5426 }
%============================================================================== \chapter{Tables} \label{sec:table} %============================================================================== \LaTeX{} file: \url{./guide_tables.tex}\\[1ex] \noindent You almost certainly have tables with results that you want to include in your thesis. You probably even know about the \Env{tabular} environment, but what about fine-tuning to get the tables exactly in the form that you want? How do you line up the decimal points in a list of cross-sections and/or $\pm$ for the errors? How can you handle a table that goes over more than one page and what if it is so wide that you would like to rotate it by \ang{90}? Also, how can you make your tables look more professional with for example thick and thin lines in appropriate places? This is easy to solve -- use \Macro{toprule}, \Macro{midrule} and \Macro{bottomrule} commands from the \Package{booktabs} package. These commands also produce much better spacing between these lines and the rows above and below. The \Package{booktabs} package gives you some advice on how good tables should look, as well as some guidelines on how to make your tables look better. The \Macro{rule} command is very useful for adding more space between rows. Kopka has several examples of its usage. You can also use \Macro{arraystretch}, e.g.\ \verb+\renewcommand{\arraystretch}{1.5}+ to increase the spacing by \SI{50}{\percent}. This command is often useful if table cells contain subscripts and/or superscripts. \Macro{toprule} etc. mean that it is usually not needed in headers. If you want columns of expandable width you can use the \Package{tabularx} package. The environment \Env{tabular*} has instead expandable intercolumn spacing. Tables usually go inside the \Env{table} environment so their position can \enquote{float}. In this chapter I use some tables inside \Env{table} and some inline. Trying to include footnotes in tables can be tricky. See Section~\ref{sec:layout:footnote} for some guidance on how this can be done. %------------------------------------------------------------------------------ \section{Use of \Macro{phantom}} \label{sec:table:phantom} %------------------------------------------------------------------------------ Although extra packages can help, a very useful command is \Macro{phantom}. This inserts white space corresponding to the width of the argument. Compare the results in the following table with two numbers 0.76 and 83.1: \begin{center} \begin{tabular}{cc|rr} \multicolumn{2}{c|}{Centred} & \multicolumn{2}{c}{Right justified} \\ & Phantom & & Phantom\\\hline 0.76 & \phantom{0}0.76 & 0.76 & 0.76\\ 83.1 & 83.1\phantom{0} & 83.1 & 83.1\phantom{0} \end{tabular} \qquad {\setlength{\extrarowheight}{0.5ex} \begin{tabular}{cc|rr} \multicolumn{2}{c|}{Centred} & \multicolumn{2}{c}{Right justified} \\ & Phantom & & Phantom\\\hline 0.76 & \phantom{0}0.76 & 0.76 & 0.76\\ 83.1 & 83.1\phantom{0} & 83.1 & 83.1\phantom{0} \end{tabular} } \end{center} \par\noindent The difference between the two tables is that the one on the right has the length \Macro{extrarowheight} set to 0.5ex. The first two columns are centred, the last two are right justified. This is clearly a bit clumsy, but it does work! %------------------------------------------------------------------------------ \section{Using \Package{siunitx} and the \Option{S} column option} \label{sec:table:siunitx} %------------------------------------------------------------------------------ The \Package{siunitx} package contains some nice tools that make the correct alignment of numbers simpler. The syntax of some of the options changed between version 1 ($\leq 2009$) and version 2 ($\geq 2011$). I discuss the version 2 options in the main text and give the equivalent version 1 options as a footnote. If you look at the \LaTeX\ code, the \TeXLive 2009 version is inside \Macro{ifthenelse\{\textbackslash texlive < 2011\}}, while the \TeXLive 2011 version is in the second block. A first simple example is shown in Table~\ref{tab:viscosity}. In fact I show the table twice, once with the language set to default and once with it set to German. The \Env{tabular} contents are identical, the second tabular is inside a \Macro{foreignlanguage}. Note the use of \Option{table-format}\footnote{\Option{tabformat} in \TeXLive 2009} to centre the temperatures as the heading is wider than the numbers. \begin{table}[htbp] \centering \ifthenelse {\texlive < 2011} {% \begin{tabular}{lS[tabformat = 4.2]S} \toprule Liquid & {Temp.} & {Viscosity $\eta$}\\ & {[\si{\celsius}]} & {(\si{\metre\pascal\second})} \\ \midrule Blood & 37 & 4.0 \\ Glycerine & 0 & 10000 \\ & 20 & 1410 \\ & 60 & 81 \\ Motor oil (SAE 10) & 30 & 200 \\ Water & 0 & 1.8 \\ & 20 & 1.00 \\ & 60 & 0.65 \\ Air & 20 & 0.018 \\ \bottomrule \end{tabular} }{% \begin{tabular}{lS[table-format = 4.2]S} \toprule Liquid & {Temp.} & {Viscosity $\eta$}\\ & {[\si{\celsius}]} & {(\si{\metre\pascal\second})} \\ \midrule Blood & 37 & 4.0 \\ Glycerine & 0 & 10000 \\ & 20 & 1410 \\ & 60 & 81 \\ Motor oil (SAE 10) & 30 & 200 \\ Water & 0 & 1.8 \\ & 20 & 1.00 \\ & 60 & 0.65 \\ Air & 20 & 0.018 \\ \bottomrule \end{tabular} } \quad \ifthenelse {\texlive < 2011} {% \foreignlanguage{ngerman}{% \begin{tabular}{lS[tabformat = 4.2]S} \toprule Flüssigkeit & {Temp.} & {Viskosität $\eta$}\\ & {[\si{\celsius}]} & {(\si{\metre\pascal\second})} \\ \midrule Blut & 37 & 4.0 \\ Glyzerin & 0 & 10000 \\ & 20 & 1410 \\ & 60 & 81 \\ Motoröl (SAE 10) & 30 & 200 \\ Wasser & 0 & 1.8 \\ & 20 & 1.00 \\ & 60 & 0.65 \\ Luft & 20 & 0.018 \\ \bottomrule \end{tabular} } }{% \foreignlanguage{ngerman}{% \begin{tabular}{lS[table-format = 4.2]S} \toprule Flüssigkeit & {Temp.} & {Viskosität $\eta$}\\ & {[\si{\celsius}]} & {(\si{\metre\pascal\second})} \\ \midrule Blut & 37 & 4.0 \\ Glyzerin & 0 & 10000 \\ & 20 & 1410 \\ & 60 & 81 \\ Motoröl (SAE 10) & 30 & 200 \\ Wasser & 0 & 1.8 \\ & 20 & 1.00 \\ & 60 & 0.65 \\ Luft & 20 & 0.018 \\ \bottomrule \end{tabular} } } \caption{A table of viscosities in the default language and German.} \label{tab:viscosity} \end{table} Table~\ref{tab:xsect0} shows a more complicated table set using the tools. You can either enclose all numbers in \Macro{num} or use the \Option{S} column descriptor. If you use \Option{S}, note that it usually centres the contents of the column. You can use the \Option{table-number-alignment}\footnote{\Option{tabnumalign} in \TeXLive 2009} option to change this. \Option{S} and \Macro{num} cannot generally be mixed in a single column though. If you want to use \Macro{num} in an \Option{S} column you have to enclose it in braces. You also need to do this with regular text, such as column headings -- see Table~\ref{tab:viscosity}. \begin{table}[htbp] \centering \renewcommand{\arraystretch}{1.2} \ifthenelse {\texlive < 2011} {% \begin{tabular}{% r@{\,:\,}S[tabnumalign=centredecimal]% r@{\,}c@{\,}l@{\,}l% S[tabformat=5.0]@{\,}l% l} \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{4}{c}{\diffetab} & \multicolumn{2}{c}{\diffnloetab} & \Cbhad \\ \multicolumn{2}{c}{} & \multicolumn{4}{c}{[\si{\pico\barn}]} & \multicolumn{2}{c}{[\si{\pico\barn\per\GeV}]} & \\ \midrule \num{-1.6} & -1.1 & \num[dp=1]{57.4} & $\pm$ & \num[dp=1]{9.4} & $^{+13}_{ -3}$ & 72 & $^{+22}_{-13}$ & \num[dp=2]{0.704} \\ \num{-1.1} & -0.8 & \num[dp=0]{121.3} & $\pm$ & \num[dp=0]{21.1} & $^{+16}_{-16}$ & 182 & $^{+50}_{-30}$ & \num[dp=2]{0.781} \\ \num{-0.8} & -0.5 & \num[dp=0]{214.1} & $\pm$ & \num[dp=0]{21.9} & $^{+22}_{-12}$ & 255 & $^{+69}_{-42}$ & \num[dp=2]{0.791} \\ \num{-0.5} & -0.2 & \num[dp=0]{232.6} & $\pm$ & \num[dp=0]{21.0} & $^{+28}_{-21}$ & 307 & $^{+83}_{-50}$ & \num[dp=2]{0.791} \\ \num{-0.2} & +0.1 & \num[dp=0]{264.1} & $\pm$ & \num[dp=0]{22.0} & $^{+28}_{-23}$ & 342 & $^{+91}_{-55}$ & \num[dp=2]{0.808} \\ \num{+0.1} & +0.5 & \num[dp=0]{316.0} & $\pm$ & \num[dp=0]{21.1} & $^{+23}_{-17}$ & 346 & $^{+96}_{-57}$ & \num[dp=2]{0.847} \\ \num{+0.5} & +1.4 & \num[dp=0]{288.1} & $\pm$ & \num[dp=0]{15.4} & $^{+20}_{-30}$ & 265 & $^{+82}_{-48}$ & \num[dp=2]{0.926} \\ \bottomrule \end{tabular} }{% \sisetup{round-mode = places} \begin{tabular}{% r@{\,:\,}S[table-number-alignment=center-decimal-marker]% r@{\,}c@{\,}l@{\,}l% S[table-format=5.0]@{\,}l% l} \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{4}{c}{\diffetab} & \multicolumn{2}{c}{\diffnloetab} & \Cbhad \\ \multicolumn{2}{c}{} & \multicolumn{4}{c}{[\si{\pico\barn}]} & \multicolumn{2}{c}{[\si{\pico\barn\per\GeV}]} & \\ \midrule \num{-1.6} & -1.1 & \num[round-precision=1]{57.4} & $\pm$ & \num[round-precision=1]{9.4} & $^{+13}_{-3}$ & 72 & $^{+22}_{-13}$ & \num[round-precision=2]{0.704} \\ \num{-1.1} & -0.8 & \num[round-precision=0]{121.3} & $\pm$ & \num[round-precision=0]{21.1} & $^{+16}_{-16}$ & 182 & $^{+50}_{-30}$ & \num[round-precision=2]{0.781} \\ \num{-0.8} & -0.5 & \num[round-precision=0]{214.1} & $\pm$ & \num[round-precision=0]{21.9} & $^{+22}_{-12}$ & 255 & $^{+69}_{-42}$ & \num[round-precision=2]{0.791} \\ \num{-0.5} & -0.2 & \num[round-precision=0]{232.6} & $\pm$ & \num[round-precision=0]{21.0} & $^{+28}_{-21}$ & 307 & $^{+83}_{-50}$ & \num[round-precision=2]{0.791} \\ \num{-0.2} & +0.1 & \num[round-precision=0]{264.1} & $\pm$ & \num[round-precision=0]{22.0} & $^{+28}_{-23}$ & 342 & $^{+91}_{-55}$ & \num[round-precision=2]{0.808} \\ \num{+0.1} & +0.5 & \num[round-precision=0]{316.0} & $\pm$ & \num[round-precision=0]{21.1} & $^{+23}_{-17}$ & 346 & $^{+96}_{-57}$ & \num[round-precision=2]{0.847} \\ \num{+0.5} & +1.4 & \num[round-precision=0]{288.1} & $\pm$ & \num[round-precision=0]{15.4} & $^{+20}_{-30}$ & 265 & $^{+82}_{-48}$ & \num[round-precision=2]{0.926} \\ \bottomrule \end{tabular} } \caption{A selection of cross-section measurements!} \label{tab:xsect0} \end{table} With \Macro{num} you can also specify the precision with which each number is shown separately. With \Option{S} you specify the format for the whole column. With \Package{siunitx} version 1 this is done using the \Option{dp} option and gives the number of decimal places for the rounding. With \Package{siunitx} version 2 you should also make sure that you specify the rounding mode in the preamble of the table (usually \Option{figures} or \Option{places} -- you can also choose \Option{off}). If you know that you are going to make such a table, it is very easy to use either of these options to write it out in this format using a program. Using \Macro{num} solves the very common problem of your program writing out the results with too many significant digits and you have to correct them all by hand later (as well as every time they get updated)! A slightly different approach that one could follow is to use \Option{S} for simple numbers in tables and \Macro{num} for more complicated number typesetting. For asymmetric errors you could consider defining something similar to \Macro{numpmerr}, which internally uses \Macro{num}. A common and closely related problem that occurs is that your analysis spits out a result such as $24.36789^{+0.36423}_{-0.45236}$. You copy and paste this into a table and then a referee (or your supervisor) complains that you clearly don't understand statistics, as you should never quote an error to 5 significant digits. You can go ahead and edit all the numbers by hand, but what do you do if you rerun your analysis and all the numbers change. Reformatting by hand is then an error prone and lengthy process. As discussed above, you can either use options such as \Option{round-precision}\footnote{\Option{dp} in \TeXLive 2009} in the \Option{S} command or use the macro \Macro{num} and \Option{dp}/\Option{round-precision} to do the rounding\index{rounding} for you. Table~\ref{tab:rounding} shows and compares two different approaches on how this can be done, even for asymmetric errors. While the form may appear to be a bit clumsy at first, it is easy enough to get your program to write out the lines. In \Package{siunitx} version 1 you should use option \Option{dp} for the rounding. In version 2 you should use the options \Option{round-mode} and \Option{round-precision}. In the first line of the left-hand part of the table I show what to do if you need to change the precision of a single number. As you can see this is rather trivial. However, then the alignment on the decimal point is no longer perfect. While this is probably OK for internal notes etc., theses or papers (should) have tougher requirements. Another way of achieving the same thing and avoiding the use of \Option{round-mode} and \Option{round-precision}\footnote{\Option{dp} in \TeXLive 2009. The \Option{round-mode} should be set in the preamble of the table and not for every number.} is shown in the right half. Note the use of options for the \Option{S} command and the use of \Macro{num} enclosed in braces to format the row that requires a different precision. It takes a while to learn what the different options mean and their consequences. I hope that these examples cover most problems and at least give ideas as to what is possible. In particular the solution on the right-hand side of Table~\ref{tab:rounding} is very nice, as all numbers except the one that requires an extra digit are written without any special formatting! \begin{table}[htbp] \centering \renewcommand{\arraystretch}{1.2} \ifthenelse {\texlive < 2011} {% \sisetup{retainplus} \begin{tabular}{% S@{\,:\,}S r@{\,}@{$\pm$}@{\,}l@{\,}l } \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{3}{c}{\diffetab} \\ \multicolumn{2}{c}{} & \multicolumn{3}{c}{[\si{\pico\barn}]} \\ \midrule {\num{-1.6}} & -1.1 & \num[dp=3]{0.574} & \num[dp=3]{0.094} & $^{\num[dp=3]{+0.131}}_{\num[dp=3]{-0.031}}$ \\ {\num{-1.1}} & -0.8 & \num[dp=2]{1.213} & \num[dp=2]{0.211} & $^{\num[dp=2]{+0.162}}_{\num[dp=2]{-0.162}}$ \\ {\num{-0.8}} & -0.5 & \num[dp=2]{2.141} & \num[dp=2]{0.219} & $^{\num[dp=2]{+0.223}}_{\num[dp=2]{-0.123}}$ \\ {\num{-0.5}} & -0.2 & \num[dp=2]{2.326} & \num[dp=2]{0.210} & $^{\num[dp=2]{+0.284}}_{\num[dp=2]{-0.214}}$ \\ {\num{-0.2}} & +0.1 & \num[dp=2]{2.641} & \num[dp=2]{0.220} & $^{\num[dp=2]{+0.283}}_{\num[dp=2]{-0.233}}$ \\ {\num{+0.1}} & +0.5 & \num[dp=2]{3.160} & \num[dp=2]{0.211} & $^{\num[dp=2]{+0.232}}_{\num[dp=2]{-0.172}}$ \\ {\num{+0.5}} & +1.4 & \num[dp=2]{2.881} & \num[dp=2]{0.154} & $^{\num[dp=2]{+0.201}}_{\num[dp=2]{-0.301}}$ \\ \bottomrule \end{tabular} }{% \sisetup{retain-explicit-plus} \sisetup{round-mode = places} \begin{tabular}{% S@{\,:\,}S r@{\,}@{$\pm$}@{\,}l@{\,}l } \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{3}{c}{\diffetab} \\ \multicolumn{2}{c}{} & \multicolumn{3}{c}{[\si{\pico\barn}]} \\ \midrule {\num{-1.6}} & -1.1 & \num[round-precision=3]{0.574} & \num[round-precision=3]{0.094} & $^{\num[round-precision=3]{+0.035}}_{\num[round-precision=3]{-0.031}}$ \\ {\num{-1.1}} & -0.8 & \num[round-precision=2]{1.213} & \num[round-precision=2]{0.211} & $^{\num[round-precision=2]{+0.162}}_{\num[round-precision=2]{-0.162}}$ \\ {\num{-0.8}} & -0.5 & \num[round-precision=2]{2.141} & \num[round-precision=2]{0.219} & $^{\num[round-precision=2]{+0.223}}_{\num[round-precision=2]{-0.123}}$ \\ {\num{-0.5}} & -0.2 & \num[round-precision=2]{2.326} & \num[round-precision=2]{0.210} & $^{\num[round-precision=2]{+0.284}}_{\num[round-precision=2]{-0.214}}$ \\ {\num{-0.2}} & +0.1 & \num[round-precision=2]{2.641} & \num[round-precision=2]{0.220} & $^{\num[round-precision=2]{+0.283}}_{\num[round-precision=2]{-0.233}}$ \\ {\num{+0.1}} & +0.5 & \num[round-precision=2]{3.160} & \num[round-precision=2]{0.211} & $^{\num[round-precision=2]{+0.232}}_{\num[round-precision=2]{-0.172}}$ \\ {\num{+0.5}} & +1.4 & \num[round-precision=2]{2.881} & \num[round-precision=2]{0.154} & $^{\num[round-precision=2]{+0.201}}_{\num[round-precision=2]{-0.301}}$ \\ \bottomrule \end{tabular} } \quad \ifthenelse {\texlive < 2011} {% \sisetup{dp = 2} \begin{tabular}{% S[tabformat=3.2, tabnumalign = right]@{\,:\,}S S[dp = 2, tabformat = 1.3, tabnumalign = right] @{$\,\pm\,$} S[dp = 2, tabformat = 1.3, tabnumalign = left] @{\,}l } \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{3}{c}{\diffetab} \\ \multicolumn{2}{c}{} & \multicolumn{3}{c}{[\si{\pico\barn}]} \\ \midrule -1.6 & -1.1 & {\num[dp=3]{0.574}} & {\num[dp=3]{0.094}} & $^{\num[dp=3]{+0.035}}_{\num[dp=3]{-0.031}}$ \\ -1.1 & -0.8 & 1.213 & 0.211 & $^{\num{+0.162}}_{\num{-0.162}}$ \\ -0.8 & -0.5 & 2.141 & 0.219 & $^{\num{+0.223}}_{\num{-0.123}}$ \\ -0.5 & -0.2 & 2.326 & 0.210 & $^{\num{+0.284}}_{\num{-0.214}}$ \\ -0.2 & +0.1 & 2.641 & 0.220 & $^{\num{+0.283}}_{\num{-0.233}}$ \\ +0.1 & +0.5 & 3.160 & 0.211 & $^{\num{+0.232}}_{\num{-0.172}}$ \\ +0.5 & +1.4 & 2.881 & 0.154 & $^{\num{+0.201}}_{\num{-0.301}}$ \\ \bottomrule \end{tabular} }{% \sisetup{round-mode = places, round-precision = 2} \begin{tabular}{% S[table-format=3.2, table-number-alignment = right]@{\,:\,}S S[round-mode = places, round-precision = 2, table-format = 1.3, table-number-alignment = right] @{$\,\pm\,$} S[round-mode = places, round-precision = 2, table-format = 1.3, table-number-alignment = left] @{\,}l } \toprule \multicolumn{2}{c}{\etajet} & \multicolumn{3}{c}{\diffetab} \\ \multicolumn{2}{c}{} & \multicolumn{3}{c}{[\si{\pico\barn}]} \\ \midrule -1.6 & -1.1 & {\num[round-precision=3]{0.574}} & {\num[round-precision=3]{0.094}} & $^{\num[round-precision=3]{+0.035}}_{\num[round-precision=3]{-0.031}}$ \\ -1.1 & -0.8 & 1.213 & 0.211 & $^{\num{+0.162}}_{\num{-0.162}}$ \\ -0.8 & -0.5 & 2.141 & 0.219 & $^{\num{+0.223}}_{\num{-0.123}}$ \\ -0.5 & -0.2 & 2.326 & 0.210 & $^{\num{+0.284}}_{\num{-0.214}}$ \\ -0.2 & +0.1 & 2.641 & 0.220 & $^{\num{+0.283}}_{\num{-0.233}}$ \\ +0.1 & +0.5 & 3.160 & 0.211 & $^{\num{+0.232}}_{\num{-0.172}}$ \\ +0.5 & +1.4 & 2.881 & 0.154 & $^{\num{+0.201}}_{\num{-0.301}}$ \\ \bottomrule \end{tabular} } \caption{Another selection of cross-section measurements! Note the use of \Macro{sisetup} to keep the plus signs on the positive errors.} \label{tab:rounding} \end{table} Another example using \Package{siunitx} tools that contains a similar problem is: \begin{center} \ifthenelse {\texlive < 2011} {% \begin{tabular}{% S[tabnumalign=centerdecimal]@{$\,\pm$}S[tabnumalign=centerdecimal]|% c% S[decimalsymbol=comma]@{$\,\pm\!\!$}S[decimalsymbol=comma]} \toprule \multicolumn{2}{c|}{English} & \multicolumn{1}{c}{German1} & \multicolumn{2}{c}{German2} \\ {Value} & {Error} & {Wert} & \multicolumn{2}{c}{Messung}\\ \midrule 0.76 & 0.14 & \num[decimalsymbol=comma]{0.89(16)} & 0.89 & 0.16\\ 83.1 & 7.6 & \num[decimalsymbol=comma]{94.2(83)} & 94.2 & 8.3\\ \bottomrule \end{tabular} }{% \begin{tabular}{% S[table-number-alignment=center-decimal-marker]@{$\,\pm$}S[table-number-alignment=center-decimal-marker]|% c% S[output-decimal-marker={,}]@{$\,\pm\!\!$}S[output-decimal-marker={,}]} \toprule \multicolumn{2}{c|}{English} & \multicolumn{1}{c}{German1} & \multicolumn{2}{c}{German2} \\ {Value} & {Error} & {Wert} & \multicolumn{2}{c}{Messung}\\ \midrule 0.76 & 0.14 & \num[output-decimal-marker={,}]{0.89(16)} & 0.89 & 0.16\\ 83.1 & 7.6 & \num[output-decimal-marker={,}]{94.2(83)} & 94.2 & 8.3\\ \bottomrule \end{tabular} } \end{center} As you can see, the \enquote{English} column formats things nicely using the \Option{S} column descriptor. The \enquote{German1} column successfully converts the decimal point to a comma and also the parentheses with the error to $\pm$. However, the alignment of the numbers is now messed up. The \enquote{German2} column looks better. I had to do some dirty tricks with the formatting of the intercolumn separator \verb+@{\,\pm\!\!}+ to get the spacing nice! This confirms my statement above that the \Option{S} format is most useful for aligning simple numbers easily, while \Macro{num} is very nice for rounding to a given precision -- note that you can use either the \Option{dp} or \Option{sf} options to achieve what you want. %------------------------------------------------------------------------------ \section{Using \Package{dcolumn}} \label{sec:tab:dcolumn} %------------------------------------------------------------------------------ An alternative is the \Package{dcolumn} package. You can also use this package to convert numbers written with \enquote{.} as the decimal point into German-style numbers with \enquote{,}.\footnote{See Section~\protect\ref{sec:layout:german} for hints on how to get around problems with the \Package{ziffer} package} You can line up measurements and errors by putting each of them in its own column. If your errors are symmetric you can put $\pm$ as the intercolumn separator: \begin{center} \begin{tabular}{cD{.}{.}{3}|cD{.}{,}{2}|rl|D{.}{.}{2}@{$\,\pm\,$}D{.}{.}{2}} \multicolumn{2}{c|}{English} & \multicolumn{2}{c|}{German} & \multicolumn{2}{c|}{Val $\pm$ Err} & \multicolumn{1}{c}{Val} & \multicolumn{1}{l}{Err}\\ \midrule 0.76 & 0.76 & 0,76 & 0.76 & 0.76 & $\pm$ 0.14 & 0.76 & 0.04\\ 83.1 & 83.1 & 83,1 & 83.1 & 83.1 & $\pm$ 4.2 & 83.1 & 4.2 \end{tabular} \end{center} Table~\ref{tab:xsect1} shows quite a complicated table set in 2 different ways. It is rotated by \SI{90}{\degree} to illustrate how that can be done. \begin{table}[htbp] \begin{sideways} \centering \begin{tabular}{r@{ : }l|c|c|c} \toprule \multicolumn{2}{c|}{\pTjet} & \diffptb & \diffnloptb & \Cbhad \\ \multicolumn{2}{c|}{(GeV)} & (pb/GeV) & (pb/GeV) & \\ \midrule $\phantom{1}$6 & 11 & $95.6\phantom{2}\pm 4.9\phantom{4}^{+9.8\phantom{2}}_{-7.0\phantom{2}}$ & $109\phantom{.22}^{+31\phantom{.22}}_{-19\phantom{.22}}$ & 0.83 \\ 11 & 16 & $24.8\phantom{2}\pm 1.2\phantom{4}^{+1.8\phantom{2}}_{-1.4\phantom{2}}$ & $\phantom{1}29.1\phantom{2}^{+\phantom{1}7.9\phantom{2}}_{-\phantom{1}4.7\phantom{2}}$ & 0.89 \\ 16 & 21 & $\phantom{2}6.02\pm 0.49^{+0.6\phantom{2}}_{-0.6\phantom{2}}$ & $\phantom{10}7.1\phantom{2}^{+\phantom{1}2.0\phantom{2}}_{-\phantom{1}1.2\phantom{2}}$ & 0.92 \\ 21 & 27 & $\phantom{2}0.93\pm 0.22^{+0.31}_{-0.20}$ & $\phantom{10}1.87^{+\phantom{1}0.54}_{-\phantom{1}0.34}$ & 0.95 \\ 27 & 35 & $\phantom{2}0.30\pm 0.12^{+0.14}_{-0.12}$ & $\phantom{10}0.46^{+\phantom{1}0.13}_{-\phantom{1}0.08}$ & 1.05 \\ \bottomrule \multicolumn{5}{c}{}\\ \toprule \multicolumn{2}{c|}{\etajet} & \diffetab & \diffnloetab & \Cbhad \\ \multicolumn{2}{c|}{} & (pb) & (pb) & \\ \midrule $-1.6$ & $-1.1$ & $\phantom{2}57\pm 22^{+13}_{-\phantom{1}3}$ & $\phantom{1}72^{+22}_{-13}$ & 0.70 \\ $-1.1$ & $-0.8$ & $121\pm 21^{+16}_{-16}$ & $182^{+50}_{-30}$ & 0.78 \\ $-0.8$ & $-0.5$ & $214\pm 22^{+22}_{-12}$ & $255^{+69}_{-42}$ & 0.79 \\ $-0.5$ & $-0.2$ & $233\pm 21^{+28}_{-21}$ & $307^{+83}_{-50}$ & 0.79 \\ $-0.2$ & $\phantom{-}0.1$ & $264\pm 22^{+28}_{-23}$ & $342^{+91}_{-55}$ & 0.81 \\ $\phantom{-}0.1$ & $\phantom{-}0.5$ & $316\pm 21^{+23}_{-17}$ & $346^{+96}_{-57}$ & 0.86 \\ $\phantom{-}0.5$ & $\phantom{-}1.4$ & $288\pm 15^{+20}_{-30}$ & $265^{+82}_{-48}$ & 0.93 \\ \bottomrule \end{tabular} \qquad \(\begin{array}{r@{\,:\,}l|D{.}{.}{2}@{\,}r@{}D{.}{.}{2}@{\,}l|D{.}{.}{2}@{\,}l|c} \toprule \multicolumn{2}{c|}{\pTjet} & \multicolumn{4}{c|}{\diffptb} & \multicolumn{2}{c|}{\diffnloptb} & \Cbhad \\ \multicolumn{2}{c|}{[\si{\GeV}]} & \multicolumn{4}{c|}{[\si{\pico\barn\per\GeV}]} & \multicolumn{2}{c|}{[\si{\pico\barn\per\GeV}]} & \\ \midrule 6 & 11 & 95.6 & \pm & 4.9 & ^{+9.8}_{-7.0} & 109 & ^{+31}_{-19} & 0.83 \\ 11 & 16 & 24.8 & \pm & 1.2 & ^{+1.8}_{-1.4} & 29.1 & ^{+7.9}_{-4.7} & 0.89 \\ 16 & 21 & 6.02 & \pm & 0.49 & ^{+0.6}_{-0.6} & 7.1 & ^{+2.0}_{-1.2} & 0.92 \\ 21 & 27 & 0.93 & \pm & 0.22 & ^{+0.31}_{-0.20} & 1.87 & ^{+0.54}_{-0.34} & 0.95 \\ 27 & 35 & 0.30 & \pm & 0.12 & ^{+0.14}_{-0.12} & 0.46 & ^{+0.13}_{-0.08} & 1.05 \\ \bottomrule \multicolumn{9}{c}{}\\ \toprule \multicolumn{2}{c|}{\etajet} & \multicolumn{4}{c|}{\diffetab} & \multicolumn{2}{c|}{\diffnloetab} & \Cbhad \\ \multicolumn{2}{c|}{} & \multicolumn{4}{c|}{[\si{\pico\barn}]} & \multicolumn{2}{c|}{[\si{\pico\barn}]} & \\ \midrule -1.6 & -1.1 & 57 & \pm & 22 & ^{+13}_{-3} & 72 & ^{+22}_{-13} & 0.70 \\ -1.1 & -0.8 & 121 & \pm & 21 & ^{+16}_{-16} & 182 & ^{+50}_{-30} & 0.78 \\ -0.8 & -0.5 & 214 & \pm & 22 & ^{+22}_{-12} & 255 & ^{+69}_{-42} & 0.79 \\ -0.5 & -0.2 & 233 & \pm & 21 & ^{+28}_{-21} & 307 & ^{+83}_{-50} & 0.79 \\ -0.2 & +0.1 & 264 & \pm & 22 & ^{+28}_{-23} & 342 & ^{+91}_{-55} & 0.81 \\ +0.1 & +0.5 & 316 & \pm & 21 & ^{+23}_{-17} & 346 & ^{+96}_{-57} & 0.86 \\ +0.5 & +1.4 & 288 & \pm & 15 & ^{+20}_{-30} & 265 & ^{+82}_{-48} & 0.93 \\ \bottomrule \end{array}\) \end{sideways} \caption{Cross-section measurements!} \label{tab:xsect1} \end{table} The 2nd version (right) is certainly simpler to typeset and does not really use any tricks to line things up. Note the use of \Env{array} rather than \Env{tabular} which means that the contents are typeset in math mode rather than text mode. For tables of numbers this is often preferred. You just have to enclose the array in \texttt{\textbackslash(...\textbackslash)} or \BegEnv{math}...\EndEnv{math}. Close inspection of the right-hand table shows that it is, however, not perfect. It is questionable whether one wants to to write $+0.5$ or just $0.5$. The fact that both \pT as well as $\eta$ cross-sections are in a single tabular, but the numerical values are so different makes it difficult to line things up perfectly. An alternative, which uses the headers to fix the width of the columns is given in Table~\ref{tab:xsect2}. Note that this uses \Env{sidewaystable} rather than \Env{sideways} inside \Env{table}, which also rotates the caption. \begin{sidewaystable} \centering \renewcommand{\arraystretch}{1.2} \begin{math} \begin{array}{D{.}{.}{4.0}@{\,:}D{.}{.}{3.1}|D{.}{.}{2}@{\,}r@{}D{.}{.}{2}@{\,}l|D{.}{.}{2}@{\,}l|c} \toprule \multicolumn{2}{p{2.0cm}|}{\rule[-1.5ex]{0pt}{4.0ex}\centering\pTjet} & \multicolumn{4}{p{3.0cm}|}{\centering\diffptb} & \multicolumn{2}{p{2.2cm}|}{\centering\diffnloptb} & \multicolumn{1}{p{1.5cm}}{\centering\Cbhad} \\ \multicolumn{2}{c|}{[\si{\GeV}]} & \multicolumn{4}{c|}{[\si{\pico\barn\per\GeV}]} & \multicolumn{2}{c|}{[\si{\pico\barn\per\GeV}]} & \\ \midrule 6 & 11 & 95.6 & \pm & 4.9 & ^{+9.8}_{-7.0} & 109 & ^{+31}_{-19} & 0.83 \\ 11 & 16 & 24.8 & \pm & 1.2 & ^{+1.8}_{-1.4} & 29.1 & ^{+7.9}_{-4.7} & 0.89 \\ 16 & 21 & 6.02 & \pm & 0.49 & ^{+0.6}_{-0.6} & 7.1 & ^{+2.0}_{-1.2} & 0.92 \\ 21 & 27 & 0.93 & \pm & 0.22 & ^{+0.31}_{-0.20} & 1.87 & ^{+0.54}_{-0.34} & 0.95 \\ 27 & 35 & 0.30 & \pm & 0.12 & ^{+0.14}_{-0.12} & 0.46 & ^{+0.13}_{-0.08} & 1.05 \\ \bottomrule \end{array} \end{math} \vspace*{2ex} \begin{math} \begin{array}{D{.}{,}{1}@{\,:}D{.}{,}{1}|D{.}{,}{5.0}@{\,}r@{}D{.}{,}{0}@{\,}l|D{.}{,}{5.0}@{\,}l|D{.}{,}{4}} \toprule \multicolumn{2}{p{2.0cm}|}{\rule[-1.5ex]{0pt}{4.0ex}\centering\etajet} & \multicolumn{4}{p{3.0cm}|}{\centering\diffetab} & \multicolumn{2}{p{2.2cm}|}{\centering\diffnloetab} & \multicolumn{1}{p{1.5cm}}{\centering\Cbhad} \\ \multicolumn{2}{c|}{} & \multicolumn{4}{c|}{[\si{\pico\barn}]} & \multicolumn{2}{c|}{[\si{\pico\barn}]} & \\ \midrule -1.6 & -1.1 & 57 & \pm & 22 & ^{+13}_{-3} & 72 & ^{+22}_{-13} & 0.70 \\ -1.1 & -0.8 & 121 & \pm & 21 & ^{+16}_{-16} & 182 & ^{+50}_{-30} & 0.78 \\ -0.8 & -0.5 & 214 & \pm & 22 & ^{+22}_{-12} & 255 & ^{+69}_{-42} & 0.79 \\ -0.5 & -0.2 & 233 & \pm & 21 & ^{+28}_{-21} & 307 & ^{+83}_{-50} & 0.79 \\ -0.2 & 0.1 & 264 & \pm & 22 & ^{+28}_{-23} & 342 & ^{+91}_{-55} & 0.81 \\ 0.1 & 0.5 & 316 & \pm & 21 & ^{+23}_{-17} & 346 & ^{+96}_{-57} & 0.86 \\ 0.5 & 1.4 & 288 & \pm & 15 & ^{+20}_{-30} & 265 & ^{+82}_{-48} & 0.93 \\ \bottomrule \end{array} \end{math} \caption[Cross-sections using \Env{sidewaystable}, which also rotates the caption.]{Cross-sections using \Env{sidewaystable}, which also rotates the caption. Just for fun the numbers indicating the $\eta$ range of the bins in the lower half have been converted to German format! Note also the dirty trick used to get the \Cbhad values nicely in the centre of the column.} \label{tab:xsect2} \end{sidewaystable} This version of the table also adds a few extra bells and whistles. It uses a \Macro{rule} of zero width to give a bit more space above and below the cross-sections. \texttt{p\{...\}} switches to paragraph mode, so \Macro{centering} is needed to get centred headers. It adds a bit more space between the rows using \Macro{arraystretch}. You have to play around a bit with the column widths. If you set one of them too small it gets expanded anyway, so the two parts of the table would not line up. Just for fun the bottom half of the table uses \enquote{,} instead of \enquote{.} for the decimal point! Admittedly the header is a bit complicated, but the numbers are nice and easy to write! %%% Local Variables: %%% mode: latex %%% TeX-master: "./thesis_guide" %%% End:
{ "alphanum_fraction": 0.5834871457, "avg_line_length": 47.6295133438, "ext": "tex", "hexsha": "5549487b9396b7dcc35ca6e17f7d8274475bf0dc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "cmp0xff/Masterarbeit", "max_forks_repo_path": "ubonn-thesis-current/guide/guide_tables.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "cmp0xff/Masterarbeit", "max_issues_repo_path": "ubonn-thesis-current/guide/guide_tables.tex", "max_line_length": 170, "max_stars_count": null, "max_stars_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "cmp0xff/Masterarbeit", "max_stars_repo_path": "ubonn-thesis-current/guide/guide_tables.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11978, "size": 30340 }
%%%%% Point Pillar Lidar Deep Learning Face Finding %%%%%% %%%%% Karissa Stisser, Zach Larson, Naseem Alfaqueh, Anjali Patil, Adrian Lindsey, Omar Rizk \documentclass{article} \usepackage[utf8]{inputenc} \usepackage{graphicx} \title{Point Pillar Lidar Deep Learning Face Finding} \author{Karissa Stisser, Zach Larson, Naseem Alfaqueh\\ \\Anjali Patil, Adrian Lindsey, Omar Rizk } \date{April 2021- https://github.com/kstisser/DeepLearningPointCloud} \begin{document} \maketitle \section{Introduction} We have created a system that uses deep learning to recognize a face in point cloud data using time series data in real time for surgical applications. As this use case is very specific, we focused on leveraging knowledge of the space and procedure to optimize the output rather than try to create a solution for all scenarios. We build a preprocessing pipeline that eliminated unnecessary points, leveraged the point pillars deep learning architecture\cite{pointpillars}, and modified the output for a point by point decision on whether that pixel belonged to the face. Development was done in two stages. First, separate pieces were developed in Jupyter notebooks. Second those notebooks were integrated into the final product. Many but not all components made it to the second stage. \section{Data Augmentation} As we started with 7 point clouds, we developed a data augmentation plan to be used on both the train and test data that involves background mixing, translation, rotation, scaling, and flipping as we would like the trained algorithm to be more robust to camera placement in relation to the face. We only modified in the x and y directions, as modifications in the z directions would actually add and remove points, which we weren't confident in accurately augmenting. We start by matching each face with each other background points. If we have n point clouds, this creates an additional n choose 2 combination of point clouds. For each of these background mixed point clouds we added translation in a spiral format starting from the face center and moving outwards in 30 different locations. We continually increment the angle by 4 * pi/30 to ensure two spiral loops, and use the following equation for center face placement: \[rt = e^{0.306*angle}\] \[x = rt * cos(angle) + originalX\] \[y = rt * sin(angle) + originalY\] At each different translation we are rotating the face 10 times in addition to the original, which we consider 0 degrees, and we evenly space the other 10 between 0 and 360. We are scaling at ${2\%}$, ${4\%}$, and ${6\%}$, both bigger and smaller to make an additional 6 point clouds, keeping the face a reasonable size within the anticipated space. Because we are using random downsampling the space between the points at these scales are not a concern as it is possible we would randomly increase or decrease the scaling of the proximity of points. Finally, we flopped the face in each of the scaled point clouds, doubling the final count, which is: \[ 3600 * \frac{n!}{2!(n-2)!} + n \] where n = the number of original point clouds. As we started with 7 point clouds, we are able to produce a total of 151,207 point clouds. As all 7 point clouds were of the same face, the next step would be to collect data of different faces with a variation on age, size, races, and distinctive features. \section{Data Preprocessing} \begin{figure}[htp] \centering \includegraphics[scale=0.4]{PreprocessingPipeline.jpg} \caption{Preprocessing Pipeline} \label{fig:preprocessingPipeline} \end{figure} Because we are not able to use all points in the point cloud given due to computational speed, we chose to add preprocessing steps to eliminate unnecessary points. As this problem is meant for a specific application with specific hardware, we were able to make certain assumptions. Because we were told there will be a table or floor under the patient being operated on, we chose to begin our algorithm with a RANSAC plane finder to eliminate all points within a reasonable plane. We then randomly downsampled to 1000 points. Next, we developed a clustering architecture which consists of a Clusterer and Cluster Profiler. The clusterer starts by running DBSCAN with eps = 0.05 and the minimum number of samples are 10. The Cluster Profiler maintains profiles of faces using number of points in the cluster, width of the cluster, and height of the cluster. We store an ideal face (an average of our downsampled data) with 200 points, 0.1907 meters width, and 0.2487 meters height to compute a score to compare each incoming cluster against. Our score calculation is: \[ numPointsScore= \frac{(averageFace.numPoints - incomingFace.numPoints)}{0.5 * (maxFace.numPoints - minFace.numPoints)} \] \[ widthScore= \frac{(averageFace.width - incomingFace.width)}{0.5 * (maxFace.width - minFace.width)} \] \[ heightScore= \frac{(averageFace.height - incomingFace.height)}{0.5 * (maxFace.height - minFace.height)} \] \[ score = numPointsScore + widthScore + heightScore \] We also maintain a profile for a minimum face with 100 points, 0.1 meters width, and 0.2 meters height, and a maximum face with 300 points, 0.4 meters width, and 0.6 meters height. If the incoming cluster has any value lower than the minimum face or larger than the maximum face it is given a score of -1 and is eliminated. If the cluster is not eliminated by this, its score is computed when compared to the average face, and if the score is above the threshold 0.7, it is kept for further parallel evaluation in the Machine Learning pipeline. \begin{figure}[htp] \centering \includegraphics[scale=0.8]{pointPillars.png} \caption{PointPillars} \label{fig:pointPillars} \end{figure} While the clustering seems to be working well, the parameters don't seem consistent enough yet to send each cluster through the model, which was our goal. So, we are currently sending one large cluster of all data through an embedding to become a point pillar\cite{pointpillars}. However, we are using 8 feature variables, not including the reflectance value as it is not available. These pillars are formed by projecting all points in the point cloud onto the x-y plane and binning them with a simple grid. The arithmetic mean of the x, y, z values of the points in each bin are taken, as well as the x and y offset of each point from the center of the pillar (the mean values). These statistics are then combined with the three dimensional coordinates of each point to give an eight dimensional vector for each point. Once processed into point pillars, we normalize the data to be between 0 and 1 before sending it through the deep learning model. The labels also need to be processed to contain a 1 or 0 for each pillar to match in dimensions. Something that was interesting and highlighted when visualizing the point pillars were that about half of them had at least one noisy label away from the face, as shown in figure \ref{fig:pointPillars} where the blue is a pillar with a face label, the red is a pillar which have only non-face points, and the green are empty pillars. This highlights that mislabelled data being fed into the model is accentuated. \section{Architecture} Once the data has been preprocessed, a vector of downsampled, clustered, point pillars are ready to enter the deep learning network. As we are working with 1200 pillars, 20 maximum points per pillar, and 8 features, the incoming shape of the data is (number of samples, 1200, 20, 8). We struggled with our model, as we failed to run due to resource constraints in a Jupyter notebook with and without TPU/GPU, locally, and with several cloud instances. Finally, with reducing to 1 batch, reducing the max points per pillar to 20, which was closer to the max points, and simplifying the model with fewer layers we were able to run locally. This resulted in the following model summary: \begin{figure}[htp] \centering \includegraphics[scale=0.8]{model2.png} \caption{Model Summary} \label{fig:modelSummary} \end{figure} We compiled with an Adam optimizer with a learning rate of 0.0001 and a Binary Crossentropy loss function. We then need to remap that to the point pillars that were fed in before delivering the result. \section{Training} We implement the traditional 80/20 train-test split in dividing the pre-processed data. The training and test data is treated by the point pillar method and the training data is passed to the model. The labels are per pillar where a pillar that includes at least one face point (1) or a pillar that includes no face points (0). Initially we were achieving a low accuracy of 0.2, but were able to increase the accuracy by reducing the learning rate to 0.0001. We also added he{\_}normal initialization weights to the first Dense layer to try to help overcome the problem of vanishing gradients. We analyzed what labels were being labelled incorrectly. For instance, for one face, 75{\%} of the missed labels were incorrectly labelling face labels no-face labels, and another face had 45{\%} face points being labelled incorrectly. This tells me that although we were getting 73{\%} accuracy, much more work needs to be done to improve focusing on the face being labelled accurately. \section{Conclusion} While there are far too many points in a point cloud to process all points in real time, we developed a method to smartly eliminate points through RANSAC, cluster elimination, and downsampling. We developed a simple but efficient model that can achieve an accuracy of 0.73 in labeling each point as a face in the incoming point cloud. Significant speedup is necessary but possible for this system to be able to run in real time. We also have future ideas for kalman filter tracking on the face cluster to predict the location of the face in the next frame, as a face can only move a certain distance at a certain time frame. If the kalman filter has an accurate enough prediction, we will be able to eliminate all points outside of a bounding box barely larger than any head to significantly reduce the number of unnecessary points prior to running the data through the model. \begin{thebibliography}{9} \bibitem{pointpillars} PointPillars: Fast Encoders for Object Detection from Point Clouds, Dec 14, 2018 \\\texttt{arXiv:1812.05784} \bibitem{githubrepo} Point Pillars in TensorFlow (2020), GitHub, https://github.com/tyagi-iiitv/PointPillars \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.7850458365, "avg_line_length": 120.367816092, "ext": "tex", "hexsha": "014199f4e6db6632d85680d56291d1dfbba12deb", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2021-03-19T18:29:58.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-19T18:12:01.000Z", "max_forks_repo_head_hexsha": "646e8e20ec62e502c0dc95d73f755809df05706b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kstisser/DeepLearningPointCloud", "max_forks_repo_path": "Paper/paper.tex", "max_issues_count": 14, "max_issues_repo_head_hexsha": "646e8e20ec62e502c0dc95d73f755809df05706b", "max_issues_repo_issues_event_max_datetime": "2021-03-19T14:24:31.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-19T14:22:13.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kstisser/DeepLearningPointCloud", "max_issues_repo_path": "Paper/paper.tex", "max_line_length": 1460, "max_stars_count": null, "max_stars_repo_head_hexsha": "646e8e20ec62e502c0dc95d73f755809df05706b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kstisser/DeepLearningPointCloud", "max_stars_repo_path": "Paper/paper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2431, "size": 10472 }
\section{Asymptotic Theory} Asymptotic theory, large sample theory, or limit theory, deals with the question of the limiting behavior of sequences of random variables. The notion of convergence for random variables is more involved than is usual for, for example, a sequence of numbers or functions in calculus. There are different types of convergence, and they do not necessarilty imply one another. The chain of implication is: \begin{equation} \mathrm{quadratic\ mean} \rightarrow \mathrm{probability} \rightarrow \mathrm{distribution} \end{equation} For point mass distributions only, convergence in distribution implies convergence in probability. Similarlty, implication rules exist for functions of random variables when convergence for the underlying random variables is given. See Theorems 5.4, 5.5 and 5.17 in \citeasnoun{wasserman2013all} (pages 73-74, 81). \subsection{Preasymptotics} The behavior in the intermediate regime, for example where $n$ large but not yet asymptotic. Asymptotic behavior kicks in at very different rates for different underlying processes. The ``baseline" in terms of speed is represented by normally distributed random variables. %convergence in probability \subsection{Convergence in Probability} \begin{equation} \mathbb{P}(|X_n - X| > \epsilon) \rightarrow 0 \end{equation} as $n\rightarrow \infty$. Convergence in Probability means that the distribution of $X_n$ becomes sharper and sharper around $X$ as $n\rightarrow \infty$. At $n=\infty$, it has point mass distribution concentrated at $X$. % convergence in distribution \subsection{Convergence in Distribution} Where $F$ is the CDF of $X_n$, \begin{equation} \lim_{n\rightarrow \infty} F_n(t) = F(t) \end{equation} For all $t$ for which $F$ is continuous. That means, convergence is satisfied even when the equality is violated at points of discontinuity. % convergence in quadratic mean \subsection{Convergence in Quadratic Mean, Convergence in $L_2$} \begin{equation} \mathbb{E}(X_n - X)^2 \rightarrow 0 \end{equation} as $n\rightarrow \infty$. % almost sure convergence \subsection{Almost Sure Convergence} $X_n$ converges \textit{almost surely} to $X$ if: \begin{equation} \mathbb{P}(\{\omega: X_n(\omega) \rightarrow X(\omega) \}) = 1 \end{equation} Which I would read as the value of the measurable map $X_n$ converging to the value of the measurable map $X$ everywhere on the probability space except possibly on a set of probability measure $0$. % l1 convergence \subsection{$L_1$ Convergence} $L_1$ convergence requires $\mathbb{E}|X_n - X| \rightarrow 0$ as $n\rightarrow 0$. % WLLN \subsection{Weak Law of Large Numbers} The sample mean of i.i.d. variables $\overline{X}_n$ converges in probability to the mean of the distribution $\mu$i. It can be proven with Chebyshev's inequality that: \begin{equation} \mathbb{P}(|\overline{X}_n-\mu|>\epsilon) \leq \frac{\mathbb{V}(\overline{X}_n)}{\epsilon^2}=\frac{\sigma^2}{n \epsilon^2} \end{equation} In words, the probability that the sample mean deviates from the population mean by more than $\epsilon$ has an upper bound that decreases inversely proportional to $n\epsilon^2$. The probability becomes more and more centered around the mean $\mu$. % SLLN \subsection{Strong Law of Large Numbers} The strong law of large number gives almost surely convergence of the sample mean to the population mean. If $\mathbb{E}|X_1| < \infty$, then $\overline{X_n}\xrightarrow{as}\mu$. % CLT \subsection{Central Limit Theorem} The distribution of the sample mean converges in distribution to a normal distribution with variance $\sigma^2/n$ and mean $\mu$. If $Z_n = \frac{\overline{X}_n - \mu}{\sqrt{\mathbb{V}(\overline{X}_n)}}$ then $\lim_{n\rightarrow \infty} \mathbb{P}(Z_n \leq z) = \Phi(z)$, where $\Phi(z)$ is the CDF of a standard normal distribution. It turns out that when $Z_n$ is obtained by normalizing not by the (most likely unknown) population variance $\sigma$ but by the sample variance $S_n^2$, the CLT still holds. The accuracy of this is given by the Berry-Ess\'een Inequality. % multivariate CLT \subsection{Multivariate Central Limit Theorem} Given $\mathbf{X_1, ... ,X_n}$ i.i.d random vectors where each vector: \begin{equation} \mathbf{X_i} = \left(\begin{array}{c}X_{1i}\\ X_{2i} \\ \vdots \\ X_{ki} \end{array}\right) \end{equation} Then the population mean: \begin{equation} \mathbf{\mu} = \left(\begin{array}{c} \mu_1 \\ \mu_2 \\ \vdots \\ \mu_k \end{array} \right) = \left(\begin{array}{c} \mathbb{E}(X_{i1}) \\ \mathbb{E}(X_{i2}) \\ \vdots \\ \mathbb{E}(X_{ik}) \end{array} \right) \end{equation} The variance is given by the matrix $\Sigma$ as before. The sample mean: \begin{equation} \overline{\mathbf{X}} = \left(\begin{array}{c}\overline{X_{1}}\\ \overline{X_{2}} \\ \vdots \\ \overline{X_{k}} \end{array}\right) \end{equation} Then $\sigma^{-\frac{1}{2}} (\overline{X}-\mu)$ converges in distribution ot $\mathscr{N}(0,1)$. % proof \subsection{Proof of the Central Limit Theorem} \citeasnoun{wasserman2013all}, page 81. Given i.i.d random variables $X_i$, the transformation $Y_i = \frac{X_i-\mu}{\sigma}$ gives i.i.d. random variables with zero mean and unit variance. Let $\psi(t)$ be the MGF of $Y_i$. Since $Y_i$ are i.i.d., the sum $\sum_{i=1}^n Y_i$ has MGF $\psi(t)^n$. The normalized sample mean $Z_n = \frac{1}{\sqrt{n}}\sum_{i=1}^n Y_i$ has MGF $\Xi_n(t)=\psi(t/\sqrt{n})^n$. Two random variables that have the same MGF in an open interval about the point $t=0$ have the same distribution, probably because the Laplace transform is injective. Therefore, if $\psi_n(t)\rightarrow \psi(t)$ in some open interval around $t=0$, then their underlying random variables $Z_n \xrightarrow{dist}Z_n$ converge in distribution. Taking the Taylor expansion of $\epsilon_n(t)$: \begin{equation} \epsilon_n(t) = \left(1+0+\frac{t^2}{2! n} + ... \right)^n \rightarrow e^{t^2/2} \end{equation} Which is the MGF of $\mathscr{N}(0,1)$ % Delta Method \subsection{Delta Method} The delta method allows statements regarding the convergence of functions of random variables, whenever the input random variable converges in distribution to a normal distribution. If $Y_n$ has a limiting normal distribution, and $g(Y_n )$ is a smooth function so that $g'(\mu) \neq 0$, then if: \begin{equation} \frac{\sqrt{n}(Y_n - \mu)}{\sigma} \xrightarrow{dist} \mathscr{N}(0,1) \end{equation} Then: \begin{equation} \frac{\sqrt{n}(g(Y_n)-g(\mu))}{|g'(\mu)|\sigma}\xrightarrow{dist}\mathscr{N}(0,1) \end{equation} Rewriting, if $Y_n \xrightarrow{dist}\mathscr{N}(\mu,\frac{\sigma^2}{n})$ then $g(Y_n)\xrightarrow{dist}\mathscr{N}(g(\mu),(g'(\mu))^2\frac{\sigma^2}{n})$. \subsubsection{Multivariate Delta Method} If $\mathbf{Y_n} \xrightarrow{dist}\mathscr{\mathbf{\mu},\mathbf{\Sigma}}$ then the scalar-valued function $g(\mathbf{Y_n}) \xrightarrow{dist}\mathscr{N}(g(\mathbf{\mu}),\frac{1}{n} (\nabla g(\mu))^T \mathbf{\Sigma} (\nabla g(\mu)) )$. Use case would be functions of several sample means, where the underlying samples have non-trivial covariance (cf. \cite{wasserman2013all}, p. 80).
{ "alphanum_fraction": 0.7301431378, "avg_line_length": 45.3885350318, "ext": "tex", "hexsha": "bed36cf1366bae43822862375982e707f5c8b177", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a2f5c1595aed616236b2b889195604f365175899", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jpbm/probabilism", "max_forks_repo_path": "notes/chapters/sections/proba_asymptotictheory.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a2f5c1595aed616236b2b889195604f365175899", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jpbm/probabilism", "max_issues_repo_path": "notes/chapters/sections/proba_asymptotictheory.tex", "max_line_length": 754, "max_stars_count": null, "max_stars_repo_head_hexsha": "a2f5c1595aed616236b2b889195604f365175899", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jpbm/probabilism", "max_stars_repo_path": "notes/chapters/sections/proba_asymptotictheory.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2139, "size": 7126 }
\subsection{Countermeasures} \begin{frame}[t] \begin{center}\Large Algebraic Security (1/3)\end{center} \begin{columns} \setlength{\leftmargini}{0.25cm} \setlength{\leftmarginii}{0.5cm} \column{0.1\linewidth}{} \column{0.35\linewidth}{ {\large Security Model:} \vspace{0.25cm} \begin{enumerate} \item<1-> \alert{random} bits allowed \\ \begin{itemize} \item as in classic masking \item model~\alert{unpredictability} \item in WB impl. as {\bf pseudorandom} \end{itemize} \item<2-> {\bf Goal:} \\ any $f \in span\{v_i\}$ is \alert{unpredictable} \item<3-> {\bf isolated} from obfuscation problems \end{enumerate} } \column{0.5\linewidth}{ \includegraphics[height=7cm]{MaskedCircuitWB.png} } \end{columns} \end{frame} \newcommand\bias{\varepsilon} \newcommand\adv{\mathcal{A}} \newcommand\Advan{\mathsf{Adv}} \begin{frame}[t] \begin{center}\Large Algebraic Security (2/3)\end{center} \begin{columns} \setlength{\leftmargini}{0.25cm} \setlength{\leftmarginii}{0.5cm} \column{0.1\linewidth}{} \column{0.35\linewidth}{ {\large Adversary:} \vspace{0.25cm} \begin{enumerate} \item<1-> chooses plaintext/key pairs \item<2-> chooses $f \in span\{v_i\}$ \item<3-> tries to {\bf predict} values of this function \\ (i.e. before random bits are sampled) %\item<4-> succeeds,\\ % if \alert{only} $f$ matches \end{enumerate} } \column{0.5\linewidth}{ \includegraphics[height=7cm]{MaskedCircuitWB.png} } \end{columns} \end{frame} \begin{frame} \begin{center}\Large Algebraic Security (3/3)\end{center} \CenterBlock{10cm}{ \begin{block}{Proposition} Let ${\color{blue}F} = \{f(x,\cdot,\cdot) ~\mid~ f(x,r_e,r_c) \in span\{v_i\},~ x \in \mathbb{F}_2^N\}.$ Let ${\color{red}e} = -\log_2{\big(1/2 + \max_{f \in {\color{blue}F}}{bias(f)}\big)}$. Then for any adversary $\adv$ choosing $Q$ inputs $$ \Advan[\adv] \le min(2^{Q-|r_c|}, 2^{-{\color{red}e}Q}). $$ \end{block} } \pause \CenterBlock{10cm}{ \begin{block}{Corollary} Let ${\color{green!40!black}k}$ be a positive integer. Then for any adversary $\adv$ $$ \Advan[\adv] \le 2^{-{\color{green!40!black}k}} \text{~if~} e > 0 \text{~and~} |r_c| \ge k\cdot(1+\frac{1}{{\color{red}e}}). $$ \end{block} } \pause \center{\bf Information-theoretic security!} \end{frame} \begin{frame}[t] \Title{Minimalist Quadratic Masking Scheme} \begin{columns} \setlength{\leftmargini}{0.5cm} \column{0.1\linewidth}{} \column{0.3\textwidth}{ \only<1>{ {\Large Masking scheme} \vspace{0.25cm} \begin{itemize} \item \alert{quadratic} decoder:\\ $(a,b,c) \mapsto ab\oplus c$ \item set of {\bf gadgets} \\ \item provably secure {\bf composition} \end{itemize} } \only<2->{ {\Large Security} \vspace{0.25cm} \begin{enumerate} \item algorithm to verify \\ that bias $\ne 1/2$ \item max. degree on $r$: 4 \end{enumerate} \onslide<3->{ \vspace{0.3cm} ~~~~$\Rightarrow$ bias $\le 7/16$ \vspace{0.3cm} ~~~~for 80-bit security \\ ~~~~we need $|r_c| \ge 940$ } } } \column{0.5\textwidth}{ \input{figures/mqms.tex} } \end{columns} \end{frame} \begin{frame} \Title{Proof-of-concept masked AES-128} \CenterBlock{10cm}{ \vspace{0.5cm} \begin{enumerate} \item MQMS + 1-st order Boolean masking \item 31,783 $\to$ 2,588,743 gates expansion (x81) \item 16 Mb code / 1 Kb RAM / 0.05s per block on a laptop \item (unoptimized) \end{enumerate} } \vspace{0.5cm} \center{\Large \href{https://github.com/cryptolu/whitebox}{github.com/cryptolu/whitebox} } \end{frame}
{ "alphanum_fraction": 0.541793491, "avg_line_length": 26.5279503106, "ext": "tex", "hexsha": "5dfc3dc471e269d02e38743b0a98a7d64230639a", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-08-05T19:40:16.000Z", "max_forks_repo_forks_event_min_datetime": "2021-08-05T19:40:16.000Z", "max_forks_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hellman/thesis", "max_forks_repo_path": "slides-source/texwb/4masking.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_issues_repo_issues_event_max_datetime": "2021-08-09T11:26:45.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-09T11:26:45.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hellman/thesis", "max_issues_repo_path": "slides-source/texwb/4masking.tex", "max_line_length": 104, "max_stars_count": 19, "max_stars_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hellman/thesis", "max_stars_repo_path": "slides-source/texwb/4masking.tex", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:36:12.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-16T19:55:41.000Z", "num_tokens": 1348, "size": 4271 }
\label{StateOfTheArt} \chapter{State of the art} In this chapter we will provide introduction into what agent-based models are and how they are used in the context of social studies. We will provide an overview of existing simulations platforms, and software particularly developed to simulate a virtual classroom. We discuss the deficiencies of those systems, and why we decided to develop our own. \section{Social Simulations and Agent-based models} Agent-based models (\textbf{ABM})\cite{Jackson2017} have been used in various fields to study complex systems that result from the interaction of many individual agents. Examples of such systems are the stock market, crowds, beehives, social networks and many more. Social Simulations are a type of ABM that focus on modeling social dynamics of humans or animals \cite{Helbing2012}. Early examples of such social simulations is Shellings work \cite{Schelling1971} studying the dynamics of social segregation or more recently on the spread of contagious diseases\cite{Perez2009} within a city. \bb The intention of many of those simulations is to find emerging social phenomena present and empirically observed in the complete system (i.e. the group), but which are absent in the individual agents\cite{Jackson2017}. Hence it is the search for emerging properties, often observed but not understood, that can be explained by the interaction of simple mechanisms of the individual agents. \bb With simulating such multi-agent systems one has the possibility to construct, monitor and manipulate the system with perfect granularity and with very little cost, making such simulations an excellent tool to study complex dynamic systems. \section{Agent-based model Software} A series of open source as well as commercial distributed Agent Software\cite{Kravi2015} exists. Some of the more popular ones are NetLogo\cite{Tissue2004}, Swarm\cite{Minar1996}, or Mesa\cite{Masad2015}, that provide frameworks to develop multi-agent simulations, often including visualization and a GUI. \bb We decided against using those existing frameworks, and instead develop our own solution based on the Unity3d\footnote{Developed by Unity Technologies and available from https://unity.com} Game and Simulation Engine. In particular Unity3d provides us with the following features that are absent or underdeveloped in other frameworks. \begin{itemize} \item \textbf{State of the Art Visualization:} Unity3D is used to develop triple A computer games and provides the possibility to build simulations with realistic appearing visuals and even virtual reality environments. \item \textbf{User Interaction:} User interaction if present at all is implemented very poorly in the most simulation frameworks. As User Interaction is an essential part in every computer game, Unity3d provides an excellent support for that. \item \textbf{Integration with other Machine Learning tools:} In the last year Unity3d has been extending its capabilities as a Agent based modeling framework by including a Machine Learning Agent toolkit that provides an easy interface between State of the Art machine Learning Tools like Tensorflow or Pytorch and the Unity simulation. \item \textbf{Actively Maintained:} Many simulation frameworks have been academic endeavors with a short lifespan, and on multiple occasions stopped to be maintained and to be available after a short period of time. Relying on a commercial sustained framework like Unity3d ensures availability and eases future development of the project. \end{itemize} Although the current version of the simulation is not making full use of all those features at the moment, Unity3d has been chosen to serve as a platform for future development based on the results achieved during the thesis. \section{Simulations of virtual Classrooms} Of particular interest to us are Simulation Systems that focus on a virtual classroom. Several academic and commercial systems have been developed with different objectives. \bb Some of those solutions (e.g. TLE TeachLivE\cite{Dieker2014}\cite{Dieker2017} or simSchool \cite{Badiee2015}) focus on teacher education, providing a virtual classroom that can be used for new teachers to learn how to interact with a class and resolve issues. Others (e.g. Katana Sim:Classroom \cite{Blume2019}) are used as a simulation environment for academic research, focusing on psychological studies. \bb Evaluating the different simulations we found that all of them lacked one or more of the following features, and therefore decided to develop our own solution. \begin{itemize} \item \textbf{OpenSource:} The Simulation should be open source and freely available for academic and commercial purposes, in order to support its adoption and support the sustainability of the project. \item \textbf{Agent Model:} The agent behavior should depend on an flexible agent logic that is based on empirical psychological studies. \item \textbf{Flexibility:} It should be possible to configure the simulation in class size, student profiles and classroom environment. \item \textbf{Reproducibility:} The simulation outcome (except of user interaction) should be reproducible, in order to provide a framework to study particular group dynamics. If multiple runs of the same simulation produce different results, it is unclear how alterations of the simulation configuration affect the outcome. \item \textbf{Data Analysis:} The simulation should include methods and tools to study the results generated. In particular it should be possible to execute multiple instances of the simulation with slightly changed conditions in order to perform a statistical analysis of the outcome. \end{itemize}
{ "alphanum_fraction": 0.8011002235, "avg_line_length": 53.8611111111, "ext": "tex", "hexsha": "d3c9f536544de95a5ae7e632ae2980e411b14f3c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a547efb977256b2404800493777e8ae689810c1f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mapa17/TFM-breakfastclub", "max_forks_repo_path": "src/text/StateOfTheArt.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a547efb977256b2404800493777e8ae689810c1f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mapa17/TFM-breakfastclub", "max_issues_repo_path": "src/text/StateOfTheArt.tex", "max_line_length": 116, "max_stars_count": null, "max_stars_repo_head_hexsha": "a547efb977256b2404800493777e8ae689810c1f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mapa17/TFM-breakfastclub", "max_stars_repo_path": "src/text/StateOfTheArt.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1236, "size": 5817 }
\documentclass[12pt]{extarticle} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{graphicx} \usepackage{nicefrac} \usepackage{subfigure} \usepackage{algorithm} \usepackage{paralist} \usepackage[geometry]{ifsym} \usepackage{rotating} \usepackage[normalem]{ulem} \usepackage{varwidth} \usepackage{lscape} % \usepackage{color} \usepackage{algpseudocode} % \bibliographystyle{amsplain} \usepackage[utf8]{inputenc} % Default fixed font does not support bold face \DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{12} % for bold \DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{12} % for normal % Custom colors \usepackage{color} \definecolor{deepblue}{rgb}{0,0,0.5} \definecolor{deepred}{rgb}{0.6,0,0} \definecolor{deepgreen}{rgb}{0,0.5,0} \usepackage{listings} % Python style for highlighting \newcommand\pythonstyle{\lstset{ language=Python, basicstyle=\ttm, otherkeywords={self}, % Add keywords here keywordstyle=\ttb\color{deepblue}, emph={MyClass,__init__}, % Custom highlighting emphstyle=\ttb\color{deepred}, % Custom highlighting style stringstyle=\color{deepgreen}, frame=tb, % Any extra options here showstringspaces=false % }} % Python environment \lstnewenvironment{python}[1][] { \pythonstyle \lstset{#1} } {} % Python for external files \newcommand\pythonexternal[1]{{ \pythonstyle \lstinputlisting{#1}}} % Python for inline \newcommand\pythoninline[1]{{\pythonstyle\lstinline!#1!}} \addtolength{\oddsidemargin}{-.2in} \addtolength{\evensidemargin}{-.5in} \addtolength{\textwidth}{0.5in} \addtolength{\topmargin}{-.5in} \addtolength{\textheight}{0.35in} \sloppy % makes TeX less fussy about line breaking \pagestyle{plain} % use just a plain page number \numberwithin{equation}{section} % add the section number to the equation label \usepackage{fancyheadings} \newcommand{\com}[1]{\texttt{#1}} \newcommand{\DIV}{\ensuremath{\mathop{\mathbf{DIV}}}} \newcommand{\GRAD}{\ensuremath{\mathop{\mathbf{GRAD}}}} \newcommand{\CURL}{\ensuremath{\mathop{\mathbf{CURL}}}} \newcommand{\CURLt}{\ensuremath{\mathop{\overline{\mathbf{CURL}}}}} \newcommand{\nullspace}{\ensuremath{\mathop{\mathrm{null}}}} \newcommand{\FrameboxA}[2][]{#2} \newcommand{\Framebox}[1][]{\FrameboxA} \newcommand{\Fbox}[1]{#1} %\usepackage[round]{natbib} \newcommand{\half}{\mbox{\small \(\frac{1}{2}\)}} \newcommand{\hf}{{\frac 12}} \newcommand {\HH} { {\bf H} } \newcommand{\hH}{\widehat{H}} \newcommand{\hL}{\widehat{L}} \newcommand{\bmath}[1]{\mbox{\bf #1}} \newcommand{\hhat}[1]{\stackrel{\scriptstyle \wedge}{#1}} \newcommand{\R}{{\rm I\!R}} \newcommand {\D} {{\vec{D}}} \newcommand {\sg}{{\hsigma}} %\renewcommand{\vec}[1]{\ensuremath{\mathbf{#1}}} \newcommand{\E}{\vec{E}} \renewcommand{\H}{\vec{H}} \newcommand{\J}{\vec{J}} \newcommand{\dd}{d^{\rm obs}} % \newcommand{\F}{\vec{F}} \newcommand{\C}{\vec{C}} \newcommand{\s}{\vec{s}} \newcommand{\N}{\vec{N}} \newcommand{\M}{\vec{M}} \newcommand{\A}{\vec{A}} \newcommand{\B}{\vec{B}} \newcommand{\w}{\vec{w}} \newcommand{\nn}{\vec{n}} \newcommand{\cA}{{\cal A}} \newcommand{\cQ}{{\cal Q}} \newcommand{\cR}{{\cal R}} \newcommand{\cG}{{\cal G}} \newcommand{\cW}{{\cal W}} \newcommand{\hsig}{\hat \sigma} \newcommand{\hJ}{\hat \J} \newcommand{\hbeta}{\widehat \beta} \newcommand{\lam}{\lambda} \newcommand{\dt}{\delta t} \newcommand{\kp}{\kappa} \newcommand {\lag} { {\cal L}} \newcommand{\zero}{\vec{0}} \newcommand{\Hr}{H_{red}} \newcommand{\Mr}{M_{red}} \newcommand{\mr}{m_{ref}} \newcommand{\thet}{\ensuremath{\mbox{\boldmath $\theta$}}} \newcommand{\curl}{\ensuremath{\nabla \times\,}} \renewcommand{\div}{\nabla\cdot\,} \newcommand{\grad}{\ensuremath{\nabla}} \newcommand{\dm}{\delta m} \newcommand{\gradh}{\ensuremath{\nabla}_h} \newcommand{\divh}{\nabla_h\cdot\,} \newcommand{\curlh}{\ensuremath{\nabla_h\times\,}} \newcommand{\curlht}{\ensuremath{\nabla_h^T\times\,}} \newcommand{\Q}{\vec{Q}} \renewcommand{\J}{\vec J} \renewcommand{\J}{\vec J} \renewcommand{\u}{\vec u} \newcommand{\f}{\vec f} \newcommand{\n}{\vec n} \renewcommand{\v}{\vec v} \newcommand{\phiv}{\vec \phi} % \usepackage[authoryear,numbers,square,sort,comma,colon,]{natbib} % \usepackage[numbers/]{natbib} % \usepackage[square,sort,comma,numbers]{natbib} \renewcommand{\ne}{N\'ed\'elec elements } \newcommand{\me}{Maxwell's equations } \newcommand{\partialt}[1]{\frac{\partial #1}{\partial t}} \newcommand{\cref}[1]{(\ref{#1})} % \newcommand{\Ct}{\ensuremath{C^{\mbox{\tiny{T}}}} \newcommand{\Ct}{\ensuremath{C^{\mbox{\tiny{T}}}}} % \renewcommand{\baselinestretch}{1.40}\normalsize \usepackage{setspace} \usepackage{amsthm} \newtheorem{prop}{Proposition}[section] \DeclareMathAlphabet{\mathpzc}{OT1}{pzc}{m}{it} \DeclareFontFamily{OT1}{pzc}{} \DeclareFontShape{OT1}{pzc}{m}{it}{<-> s * [0.900] pzcmi7t}{} \DeclareMathAlphabet{\mathpzc}{OT1}{pzc}{m}{it} \onehalfspacing \begin{document} \pagestyle{fancyplain} \fancyhead{} \fancyfoot{} % clear all footer fields \fancyfoot[LE,RO]{\thepage \hspace{-5mm}} \fancyfoot[LO,CE]{ \footnotesize{ Michael Wathen 7830121}} \fancyfoot[CO,RE]{} \subsection*{$$\curl \curl \u +c\u = \f$$} \subsubsection*{Nedelec$_1$ - order 1} \begin{tabular}{lrrlrlr} \hline {} & Total DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 28 & 1.42e-04 & 7.0716e-01 & 0.00 & 6.2831e+00 & 0.00 \\ 1 & 104 & 1.76e-04 & 3.1843e-01 & 1.15 & 2.5051e+00 & 1.33 \\ 2 & 400 & 6.90e-04 & 1.6044e-01 & 0.99 & 1.1958e+00 & 1.07 \\ 3 & 1568 & 3.05e-03 & 8.0190e-02 & 1.00 & 5.8605e-01 & 1.03 \\ 4 & 6208 & 1.87e-02 & 4.0084e-02 & 1.00 & 2.9136e-01 & 1.01 \\ 5 & 24704 & 1.23e-01 & 2.0040e-02 & 1.00 & 1.4547e-01 & 1.00 \\ 6 & 98560 & 7.99e-01 & 1.0020e-02 & 1.00 & 7.2706e-02 & 1.00 \\ \hline \end{tabular} \subsubsection*{Nedelec$_1$ - order 2} \begin{tabular}{lrrlrlr} \hline {} & Total DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 88 & 2.77e-04 & 1.4222e-01 & 0.00 & 7.8349e-01 & 0.00 \\ 1 & 336 & 4.29e-04 & 4.7178e-02 & 1.59 & 4.4258e-01 & 0.82 \\ 2 & 1312 & 2.06e-03 & 1.1869e-02 & 1.99 & 1.1146e-01 & 1.99 \\ 3 & 5184 & 1.15e-02 & 2.9788e-03 & 1.99 & 2.7943e-02 & 2.00 \\ 4 & 20608 & 9.70e-02 & 7.4552e-04 & 2.00 & 6.9910e-03 & 2.00 \\ 5 & 82176 & 5.45e-01 & 1.8643e-04 & 2.00 & 1.7481e-03 & 2.00 \\ 6 & 328192 & 7.06e+00 & 4.6612e-05 & 2.00 & 4.3704e-04 & 2.00 \\ \hline \end{tabular} \subsubsection*{Nedelec$_2$ - order 1} \begin{tabular}{lrrlrlr} \hline {} & Total DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 56 & 1.69e-04 & 7.0710e-01 & 0.00 & 6.2831e+00 & 0.00 \\ 1 & 208 & 3.31e-04 & 2.1056e-01 & 1.75 & 2.4973e+00 & 1.33 \\ 2 & 800 & 1.55e-03 & 6.0010e-02 & 1.81 & 1.1945e+00 & 1.06 \\ 3 & 3136 & 9.14e-03 & 1.5493e-02 & 1.95 & 5.8587e-01 & 1.03 \\ 4 & 12416 & 6.97e-02 & 3.9043e-03 & 1.99 & 2.9134e-01 & 1.01 \\ 5 & 49408 & 5.01e-01 & 9.7802e-04 & 2.00 & 1.4546e-01 & 1.00 \\ 6 & 197120 & 7.05e+00 & 2.4463e-04 & 2.00 & 7.2705e-02 & 1.00 \\ \hline \end{tabular} \subsubsection*{Nedelec$_2$ - order 2} \begin{tabular}{lrrlrlr} \hline {} & Total DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 132 & 2.46e-04 & 3.0169e-02 & 0.00 & 7.8388e-01 & 0.00 \\ 1 & 504 & 1.14e-03 & 1.3531e-02 & 1.16 & 4.3854e-01 & 0.84 \\ 2 & 1968 & 4.20e-03 & 1.6029e-03 & 3.08 & 1.1120e-01 & 1.98 \\ 3 & 7776 & 2.45e-02 & 1.9768e-04 & 3.02 & 2.7927e-02 & 1.99 \\ 4 & 30912 & 1.89e-01 & 2.4627e-05 & 3.00 & 6.9900e-03 & 2.00 \\ 5 & 123264 & 1.43e+00 & 3.0758e-06 & 3.00 & 1.7480e-03 & 2.00 \\ 6 & 492288 & 1.47e+01 & 3.8439e-07 & 3.00 & 4.3704e-04 & 2.00 \\ \hline \end{tabular} \newpage \section*{Maxwell saddle point - c = 0} \subsubsection*{(Nedelec$_1$,CG$_1$) - (1,1)} \begin{tabular}{lrrrlrlr} \hline {} & Total DoF & B DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 41 & 28 & 0.00 & 5.9415e-02 & 0.00 & 2.3570e-01 & 0 \\ 1 & 145 & 104 & 0.00 & 2.9524e-02 & 1.01 & 1.1785e-01 & 1 \\ 2 & 545 & 400 & 0.01 & 1.4739e-02 & 1.00 & 5.8926e-02 & 1 \\ 3 & 2113 & 1568 & 0.03 & 7.3667e-03 & 1.00 & 2.9463e-02 & 1 \\ 4 & 8321 & 6208 & 0.12 & 3.6830e-03 & 1.00 & 1.4731e-02 & 1 \\ 5 & 33025 & 24704 & 0.59 & 1.8414e-03 & 1.00 & 7.3657e-03 & 1 \\ 6 & 131585 & 98560 & 2.91 & 9.2071e-04 & 1.00 & 3.6828e-03 & 1 \\ \hline \end{tabular} \begin{tabular}{lrrrlrlr} \hline {} & Total DoF & R DoF & Soln Time & R-L2 & R-order & R-H1 & H1-order \\ \hline 0 & 41 & 13 & 0.00 & 1.0257e-02 & 0.00 & 6.9112e-02 & 0.00 \\ 1 & 145 & 41 & 0.00 & 2.5760e-03 & 1.99 & 3.3734e-02 & 1.03 \\ 2 & 545 & 145 & 0.01 & 6.4320e-04 & 2.00 & 1.6760e-02 & 1.01 \\ 3 & 2113 & 545 & 0.03 & 1.6072e-04 & 2.00 & 8.3667e-03 & 1.00 \\ 4 & 8321 & 2113 & 0.12 & 4.0176e-05 & 2.00 & 4.1817e-03 & 1.00 \\ 5 & 33025 & 8321 & 0.59 & 1.0044e-05 & 2.00 & 2.0906e-03 & 1.00 \\ 6 & 131585 & 33025 & 2.91 & 2.5109e-06 & 2.00 & 1.0453e-03 & 1.00 \\ \hline \end{tabular} \subsubsection*{(Nedelec$_1$,CG$_1$) - (2,2)} \begin{tabular}{lrrrlrlr} \hline {} & Total DoF & B DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 129 & 88 & 0.00 & 7.0820e-03 & 0 & 2.6736e-04 & 0.00 \\ 1 & 481 & 336 & 0.01 & 1.7705e-03 & 2 & 3.9471e-05 & 2.76 \\ 2 & 1857 & 1312 & 0.03 & 4.4262e-04 & 2 & 5.1382e-06 & 2.94 \\ 3 & 7297 & 5184 & 0.10 & 1.1066e-04 & 2 & 6.4885e-07 & 2.99 \\ 4 & 28929 & 20608 & 0.44 & 2.7664e-05 & 2 & 8.1314e-08 & 3.00 \\ 5 & 115201 & 82176 & 2.24 & 6.9160e-06 & 2 & 1.0171e-08 & 3.00 \\ 6 & 459777 & 328192 & 11.09 & 1.7290e-06 & 2 & 1.2739e-09 & 3.00 \\ \hline \end{tabular} \begin{tabular}{lrrrlrlr} \hline {} & Total DoF & R DoF & Soln Time & R-L2 & R-order & R-H1 & H1-order \\ \hline 0 & 129 & 41 & 0.00 & 4.2403e-04 & 0.00 & 1.2952e-02 & 0.00 \\ 1 & 481 & 145 & 0.01 & 5.3673e-05 & 2.98 & 3.1099e-03 & 2.06 \\ 2 & 1857 & 545 & 0.03 & 6.7369e-06 & 2.99 & 7.6935e-04 & 2.02 \\ 3 & 7297 & 2113 & 0.10 & 8.4321e-07 & 3.00 & 1.9183e-04 & 2.00 \\ 4 & 28929 & 8321 & 0.44 & 1.0544e-07 & 3.00 & 4.7926e-05 & 2.00 \\ 5 & 115201 & 33025 & 2.24 & 1.3148e-08 & 3.00 & 1.1980e-05 & 2.00 \\ 6 & 459777 & 131585 & 11.09 & 1.3649e-09 & 3.27 & 2.9948e-06 & 2.00 \\ \hline \end{tabular} \newpage \subsection*{MHD} \subsubsection*{No coupling - orders (2,1,2,2)} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & V DoF & Soln Time & V-L2 & L2-order & V-H1 & H1-order \\ \hline 0 & 224 & 82 & 0.00 & 1.05e-03 & 0.00 & 2.74e-02 & 0 \\ 1 & 812 & 290 & 0.01 & 1.30e-04 & 3.02 & 6.87e-03 & 2 \\ 2 & 3092 & 1090 & 0.06 & 1.61e-05 & 3.01 & 1.72e-03 & 2 \\ 3 & 12068 & 4226 & 0.29 & 2.01e-06 & 3.00 & 4.30e-04 & 2 \\ 4 & 47684 & 16642 & 1.44 & 2.36e-07 & 3.09 & 1.08e-04 & 2 \\ 5 & 189572 & 66050 & 7.84 & 7.83e-08 & 1.59 & 2.69e-05 & 2 \\ \hline \end{tabular} \begin{tabular}{lrrrrr} \hline {} & Total DoF & P DoF & Soln Time & P-L2 & L2-order \\ \hline 0 & 224 & 13 & 0.00 & 1.00e-02 & 0.00 \\ 1 & 812 & 41 & 0.01 & 2.47e-03 & 2.02 \\ 2 & 3092 & 145 & 0.06 & 6.21e-04 & 1.99 \\ 3 & 12068 & 545 & 0.29 & 1.56e-04 & 2.00 \\ 4 & 47684 & 2113 & 1.44 & 3.90e-05 & 2.00 \\ 5 & 189572 & 8321 & 7.84 & 9.74e-06 & 2.00 \\ \hline \end{tabular} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & B DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 224 & 88 & 0.00 & 6.86e-03 & 0.00 & 3.31e-02 & 0 \\ 1 & 812 & 336 & 0.01 & 1.76e-03 & 1.97 & 8.27e-03 & 2 \\ 2 & 3092 & 1312 & 0.06 & 4.42e-04 & 1.99 & 2.07e-03 & 2 \\ 3 & 12068 & 5184 & 0.29 & 1.11e-04 & 2.00 & 5.17e-04 & 2 \\ 4 & 47684 & 20608 & 1.44 & 2.77e-05 & 2.00 & 1.29e-04 & 2 \\ 5 & 189572 & 82176 & 7.84 & 6.92e-06 & 2.00 & 3.23e-05 & 2 \\ \hline \end{tabular} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & R DoF & Soln Time & R-L2 & R-order & R-H1 & H1-order \\ \hline 0 & 224 & 41 & 0.00 & 5.22e-04 & 0.00 & 1.29e-02 & 0.00 \\ 1 & 812 & 145 & 0.01 & 6.48e-05 & 3.01 & 3.10e-03 & 2.05 \\ 2 & 3092 & 545 & 0.06 & 8.10e-06 & 3.00 & 7.69e-04 & 2.01 \\ 3 & 12068 & 2113 & 0.29 & 1.01e-06 & 3.00 & 1.92e-04 & 2.00 \\ 4 & 47684 & 8321 & 1.44 & 1.27e-07 & 3.00 & 4.79e-05 & 2.00 \\ 5 & 189572 & 33025 & 7.84 & 1.58e-08 & 3.00 & 1.20e-05 & 2.00 \\ \hline \end{tabular} \subsubsection*{Coupled - orders (2,1,2,2)} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & V DoF & Soln Time & V-L2 & L2-order & V-H1 & H1-order \\ \hline 0 & 224 & 82 & 0.00 & 1.05e-03 & 0.00 & 2.74e-02 & 0 \\ 1 & 812 & 290 & 0.02 & 1.30e-04 & 3.02 & 6.87e-03 & 2 \\ 2 & 3092 & 1090 & 0.11 & 1.61e-05 & 3.01 & 1.72e-03 & 2 \\ 3 & 12068 & 4226 & 0.54 & 2.01e-06 & 3.00 & 4.30e-04 & 2 \\ 4 & 47684 & 16642 & 2.90 & 2.36e-07 & 3.09 & 1.08e-04 & 2 \\ 5 & 189572 & 66050 & 17.11 & 7.86e-08 & 1.59 & 2.69e-05 & 2 \\ \hline \end{tabular} \begin{tabular}{lrrrrr} \hline {} & Total DoF & P DoF & Soln Time & P-L2 & L2-order \\ \hline 0 & 224 & 13 & 0.00 & 1.00e-02 & 0.00 \\ 1 & 812 & 41 & 0.02 & 2.47e-03 & 2.02 \\ 2 & 3092 & 145 & 0.11 & 6.21e-04 & 1.99 \\ 3 & 12068 & 545 & 0.54 & 1.56e-04 & 2.00 \\ 4 & 47684 & 2113 & 2.90 & 3.90e-05 & 2.00 \\ 5 & 189572 & 8321 & 17.11 & 9.74e-06 & 2.00 \\ \hline \end{tabular} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & B DoF & Soln Time & B-L2 & L2-order & B-Curl & Curl-order \\ \hline 0 & 224 & 88 & 0.00 & 6.86e-03 & 0.00 & 3.31e-02 & 0 \\ 1 & 812 & 336 & 0.02 & 1.76e-03 & 1.97 & 8.27e-03 & 2 \\ 2 & 3092 & 1312 & 0.11 & 4.42e-04 & 1.99 & 2.07e-03 & 2 \\ 3 & 12068 & 5184 & 0.54 & 1.11e-04 & 2.00 & 5.17e-04 & 2 \\ 4 & 47684 & 20608 & 2.90 & 2.77e-05 & 2.00 & 1.29e-04 & 2 \\ 5 & 189572 & 82176 & 17.11 & 6.92e-06 & 2.00 & 3.23e-05 & 2 \\ \hline \end{tabular} \begin{tabular}{lrrrrrrr} \hline {} & Total DoF & R DoF & Soln Time & R-L2 & R-order & R-H1 & H1-order \\ \hline 0 & 224 & 41 & 0.00 & 5.06e-04 & 0.00 & 1.31e-02 & 0.00 \\ 1 & 812 & 145 & 0.02 & 6.44e-05 & 2.97 & 3.12e-03 & 2.07 \\ 2 & 3092 & 545 & 0.11 & 8.09e-06 & 2.99 & 7.70e-04 & 2.02 \\ 3 & 12068 & 2113 & 0.54 & 1.01e-06 & 3.00 & 1.92e-04 & 2.00 \\ 4 & 47684 & 8321 & 2.90 & 1.27e-07 & 3.00 & 4.79e-05 & 2.00 \\ 5 & 189572 & 33025 & 17.11 & 1.58e-08 & 3.00 & 1.20e-05 & 2.00 \\ \hline \end{tabular} \end{document}
{ "alphanum_fraction": 0.4928223989, "avg_line_length": 38.4885844749, "ext": "tex", "hexsha": "5707b925c784e73390eb649441d87a301e7c6ebc", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2020-01-13T13:59:44.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-28T16:12:13.000Z", "max_forks_repo_head_hexsha": "35524f40028541a4d611d8c78574e4cf9ddc3278", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wathen/PhD", "max_forks_repo_path": "MHD/LaTeX/MaxwellConvergence/Convergence.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "35524f40028541a4d611d8c78574e4cf9ddc3278", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wathen/PhD", "max_issues_repo_path": "MHD/LaTeX/MaxwellConvergence/Convergence.tex", "max_line_length": 95, "max_stars_count": 3, "max_stars_repo_head_hexsha": "35524f40028541a4d611d8c78574e4cf9ddc3278", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "wathen/PhD", "max_stars_repo_path": "MHD/LaTeX/MaxwellConvergence/Convergence.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-10T21:27:30.000Z", "max_stars_repo_stars_event_min_datetime": "2020-10-25T13:30:20.000Z", "num_tokens": 7801, "size": 16858 }
% ========================================= % COMMAND: _CALL % ========================================= \newpage \section{\_CALL} \label{cmd:_CALL} \paragraph{Syntax:} \subparagraph{} \texttt{\_CALL <name of block>} \paragraph{Purpose:} \subparagraph{} Invokes the given \texttt{BLOCK}. See \autoref{chap:block} on page~\pageref{chap:block} for examples. \paragraph{Namespaces:} \subparagraph{} Namespace prefixes can be added with the local command \texttt{\_USE} (see \autoref{cmd:USE} on page~\pageref{cmd:USE} for details).
{ "alphanum_fraction": 0.6175373134, "avg_line_length": 23.3043478261, "ext": "tex", "hexsha": "505ad274514ec2574458f4fbeb1d75f20c66a0f6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "ia97lies/httest", "max_forks_repo_path": "doc/users-guide/local-commands/cmd_call.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "ia97lies/httest", "max_issues_repo_path": "doc/users-guide/local-commands/cmd_call.tex", "max_line_length": 59, "max_stars_count": 4, "max_stars_repo_head_hexsha": "5bbe912ffde2e74b382405f580ef5963bf792288", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "ia97lies/httest", "max_stars_repo_path": "doc/users-guide/local-commands/cmd_call.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-01T13:22:10.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-16T07:47:43.000Z", "num_tokens": 147, "size": 536 }
\setlength{\parindent}{1em} \chapter{Acknowledgments} Thanks first of all go to the authors of the textbooks here stitched together: P.D. Magnus for \emph{For All} X and Cathal Woods for \emph{Introduction to Reasoning}. My thanks go to them for writing the excellent textbooks that have been incorporated into this one, for making those publicly available under Creative Commons licenses, and for giving their blessing to this derivative work. In general, this book would not be possible without a culture of sharing knowledge. The book was typeset using \LaTeX$2\varepsilon$ developed by Leslie Lamport. Lamport was building on \TeX by Donald Knuth. Peter Selinger built on what Lamport made by developing the Fitch typesetting format that the proofs were laid out in. Diagrams were made in PikZ by Till Tantu. All of these are coding systems are not only freely available online, they have extensive user support communities. Add-on packages are designed, manuals are written, questions are answered in discussion forums, all by people who are donating their time and expertise. The culture of sharing isn't just responsible for the typesetting of this book; it was essential to the content. Essential background information comes from the free online \textit{Stanford Encyclopedia of Philosophy}. Primary sources from the history of logic came from \textit{Project Gutenberg}. Logicians, too, can and should create free knowledge. Many early adopters of this text provided invaluable feedback, including Jeremy Dolan, Terry Winant, Benjamin Lennertz, Ben Sheredos, and Michael Hartsock. Lennertz, in particular, provided useful edits. Helpful comments were also made by Ben Cordry, John Emerson, Andrew Mills, Nathan Smith, Vera Tobin, Cathal Woods, and many more that I have forgot to mention, but whose emails are probably sitting on my computer somewhere. I would also like to thank Lorain County Community College for providing the sabbatical leave that allowed me to write the sections of this book on Aristotelian logic. Special thanks goes to all the students at LCCC who had to suffer through earlier versions of this work and provided much helpful feedback. Most importantly, I would like to thank Molly, Caroline and Joey for their incredible love and support. \begin{adjustwidth}{2em}{0em} J. Robert Loftis \\ \noindent \emph{Elyria, Ohio, USA} \end{adjustwidth} \pagebreak \thispagestyle{empty} \noindent Intellectual debts too great to articulate are owed to scholars too many to enumerate. At different points in the work, readers might detect the influence of various works of Aristotle, Toulmin (especially \cite*{Toulmin1958}), Fisher and Scriven \parencite*{Fisher1997}, Walton (especially \cite*{Walton1996}), Epstein \parencite*{Epstein2002}, Johnson-Laird (especially \cite*{johnson2006we}), Scriven \parencite*{Scriven1962}, Giere \parencite*{giere1997understanding} and the works of the Amsterdam school of pragma-dialectics \citep{van2002argumentation}. Thanks are due to Virginia Wesleyan College for providing me with Summer Faculty Development funding in 2008 and 2010 and a Batten professorship in 2011. These funds, along with some undergraduate research funds (also provided by VWC), allowed me to hire students Gaby Alexander (2008), Ksera Dyette (2009), Mark Jones (2008), Andrea Medrano (2011), Lauren Perry (2009), and Alan Robertson (2010). My thanks to all of them for their hard work and enthusiasm. For feedback on the text, thanks are due to James Robert (Rob) Loftis (Lorain County Community College) and Bill Roche (Texas Christian University). Answers (to exercises) marked with “(JRL)” are by James Robert Loftis. Particular thanks are due to my (once) Ohio State colleague Bill Roche. The book began as a collection of lecture notes, combining work by myself and Bill. \begin{adjustwidth}{2em}{0em} Cathal Woods\\ \noindent\emph{Norfolk, Virginia, USA}\\ \noindent(Taken from \emph{Introduction to Reasoning} (\citeyear{Woods2014})) \end{adjustwidth} \vspace{3cm} \noindent The author would like to thank the people who made this project possible. Notable among these are Cristyn Magnus, who read many early drafts; Aaron Schiller, who was an early adopter and provided considerable, helpful feedback; {and} Bin Kang, Craig Erb, Nathan Carter, Wes McMichael, and the students of Introduction to Logic, who detected various errors in previous versions of the book. \begin{adjustwidth}{2em}{0em} P.D. Magnus \\ \noindent\emph{Albany, New York, USA}\\ \noindent(Taken from \emph{For All X} (\citeyear{Magnus2008})) \end{adjustwidth}
{ "alphanum_fraction": 0.7869593286, "avg_line_length": 91.1176470588, "ext": "tex", "hexsha": "ffc6cde6e48e00e3c76d6870c75c2979fa99a6b3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "042c4e6e993d235cf9f2b04879d2171e517acc54", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "robinson-philo/openintroduction", "max_forks_repo_path": "tex/04-acknowledgements.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "042c4e6e993d235cf9f2b04879d2171e517acc54", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "robinson-philo/openintroduction", "max_issues_repo_path": "tex/04-acknowledgements.tex", "max_line_length": 638, "max_stars_count": null, "max_stars_repo_head_hexsha": "042c4e6e993d235cf9f2b04879d2171e517acc54", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "robinson-philo/openintroduction", "max_stars_repo_path": "tex/04-acknowledgements.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1106, "size": 4647 }
%!TEX root = soutenance_lei_2018.tex \subsection{Panoramic view of the heat flux inside the vehicle} \frame{\tableofcontents[currentsection,currentsubsection]} \begin{frame}{Outline} As a truck is normally huge, the homography transformation would then have a large deviation in the final thermal images.\\ \pause ==> Take photo inside the truck ?\\ \pause ==> \alert{Panoramic view!} \end{frame} \begin{frame}{Experimental Set-up} \begin{figure}[ht] \centering \includegraphics[scale=0.3]{img/ch3/Camera_setup.jpg} \caption{Pan-tilt device for experiment test} \end{figure} \end{frame} \begin{frame}{Image processing--\small{spherical projection}} \begin{figure} % \centering \hspace*{-15pt} \includegraphics[scale=0.238]{img/ch3/Sph_1.jpg} \includegraphics[scale=0.238]{img/ch3/Sph_2.jpg} \caption{Spherical projection.} \label{Sph_pro} \end{figure} \end{frame} \begin{frame}{Image processing--\small{spherical projection}} Therefore, given the focal length $ f $ and the image coordinates $ (x, y) $, the corresponding spherical coordinates $ (x', y') $ are: \begin{align*} x'={} f \cdot tan(\dfrac{x-x_c}{f})+x_c \notag \\ y'=f \cdot \frac{tan(\dfrac{y-y_c}{f})}{cos(\dfrac{x-x_c}{f})} +y_c \end{align*} where $ (x_c,y_c) $ are the center coordinates of the spherical image. \end{frame} \begin{frame}{Image processing--\small{spherical projection}} The original image and its spherical projection are then: \begin{figure} % \centering \hspace*{-15pt} \includegraphics[scale=0.336]{img/ch3/img7.jpg} \includegraphics[scale=0.336]{img/ch3/sph_img7.jpg} \caption{Original IR image (left) and its spherical projection (right).} \label{Orig_sph} \end{figure} \end{frame} \begin{frame}{Image projection--\small{Harris corner}} \begin{figure} % \centering \hspace*{-15pt} \includegraphics[scale=0.50]{img/ch3/Harris1.png} \includegraphics[scale=0.50]{img/ch3/Harris2.png} \caption{Harris corners detected (green crosses) in the figure above and its precedent image of the whole series. } % \label{Harris} \end{figure} % In these two continuous images, the IR camera scanned from top to bottom inside the vehicle and there is a ``clear" (from our vision) overlapping part (left bottom part in the first and left top part in the second) in these two images. However, from the Harris corners detection results, all the features obtained could not be matched correctly. \end{frame} \begin{frame}{Image translation and stitching} \begin{figure} \centering \vspace*{-10pt} \includegraphics[scale=0.3]{img/ch3/img_trs.jpg}\\ \pause \includegraphics[scale=0.3]{img/ch3/img_sti.jpg} \end{figure} \end{frame} \begin{frame}{Temperature panorama} \begin{figure} \hspace*{-15pt} \includegraphics[scale=0.222]{img/ch3/Pano_T_Final.jpg} \end{figure} \centering Mean value: $ \overline{T} = 305.94$ K $= 32.79$ °C \end{frame} \begin{frame}{Heat flux meter} \begin{figure} \centering \vspace*{-18pt} \includegraphics[scale=0.31]{img/ch3/QIRT2015Aisa_Fig8.png} \end{figure} % \pause Mean value after 10 hours (steady condition): $q_{ref}=11.460\; W/m^2$ \end{frame} \begin{frame}{Heat flux panorama} \begin{figure} \hspace*{-15pt} \includegraphics[scale=0.222]{img/ch3/Pano_Q_Final.jpg} \end{figure} \centering Mean value: $\bar{q}=11.724\; W/m^2$ \end{frame} \begin{frame}{Results \& discussion} Therefore, the final K-value from IR thermography is: \begin{equation*} K_{th}=\frac{\bar{q}}{\Delta ̅\theta} =\frac{11.724\; W/m^2}{(32.79-7.5)\;K}=0.464\; W/K\cdot m^2 \end{equation*} \pause Comparing with ATP standard: \begin{table}[ht] \centering \caption{ATP test results.} \begin{tabular}{l|r} \hline Fans Power [W] (Mean over 6 hours) & 144 \\ \hline Heaters Power [W] (Mean over 6 hours) & 988\\ \hline Internal temperature [°C] (Mean over 6 hours) & 32.5\\ \hline External temperature [°C] (Mean over 6 hours) & 7.5\\ \hline K-Value [ W/(K m$^2 $)] & 0.46 \\ \hline \end{tabular} % \label{ATP_res} \end{table} \pause The error between these two results is then: \begin{equation*} e= \frac{|K_{th}-K|}{K}=\frac{0.464-0.46}{0.46}=0.0087=0.87\% \end{equation*} \end{frame} \begin{frame}{Results \& discussion} \begin{itemize}[<+->] \pause \large \item Thermal resistance model works well \item Favorable result in panoramic view compared to ATP \item Uniform and repeatable texture inside the truck toughened the automatic feature detection and image stitching \item Amelioration in need for image processing with advanced feature detection and description \end{itemize} \end{frame}
{ "alphanum_fraction": 0.6218312524, "avg_line_length": 33.6687898089, "ext": "tex", "hexsha": "7d231b27e11855087081696181db89ff1ad5946c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aaa976e6c1c9bf932cd7cb44147a6a25a0537e39", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Crescent-Saturn/PhD_grind", "max_forks_repo_path": "beamer/chp3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "aaa976e6c1c9bf932cd7cb44147a6a25a0537e39", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Crescent-Saturn/PhD_grind", "max_issues_repo_path": "beamer/chp3.tex", "max_line_length": 352, "max_stars_count": null, "max_stars_repo_head_hexsha": "aaa976e6c1c9bf932cd7cb44147a6a25a0537e39", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Crescent-Saturn/PhD_grind", "max_stars_repo_path": "beamer/chp3.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1548, "size": 5286 }
%$Id:$ \documentclass[10pt]{article} \usepackage{times} \usepackage{epsfig,verbatim} \usepackage{cite,mathptm} \begin{document} \title{Serverless architecture with Unikernels} \author{ Gunjan Patel\\ {\it [email protected]} } \maketitle \begin{abstract} {\it \input{abstract} Not {\bf in} the abstract file!!!!! } \end{abstract} \section{Introduction} \label{sec:intro} This is \S\ref{sec:intro}. \noindent In-line math mode: $y = x^{\sqrt{2^{x+y}}}_{\phi \Phi}$. An equation array: \begin{eqnarray} x & = & \sqrt{y} \\ & = & z^{y+d_{(i+j)}} \nonumber \\ & = & 12 \end{eqnarray} If using {\em this} template, please rename ``student'' in the file names to your login name in the cs.pitt.edu domain. That'll make it easier to manage multiple electronic submissions. To build this file do the following: \begin{verbatim} $ make pdf \end{verbatim} \section{Related Work} \label{sec:related} Remember to cite papers on related work, but also remember to cite papers that describe traces you use ({\it e.g.} Drew Roselli's technical report~\cite{roselli98}, when using the Berkeley traces). For your first use of this template ... just a single section is sufficient, don't worry about paper structure yet. \section{Conclusion} \label{sec:conclusion} %A comment in latex is preceded with a percentage sign %The following two lines specify the bibliography file(s) used % ... in this case student-doc.bib, and the style of the bibliography % ... in this case ieee.bst \bibliography{student-doc} \bibliographystyle{ieee} \end{document}
{ "alphanum_fraction": 0.7217613274, "avg_line_length": 20.3506493506, "ext": "tex", "hexsha": "2663d397810eefbedae7ccd8d6c857ee4a74c374", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7d7dc7bd8afe862b269195706afa2d2ed86a2be3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gunjan5/Argo", "max_forks_repo_path": "doc/serverless-unikernels.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "7d7dc7bd8afe862b269195706afa2d2ed86a2be3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gunjan5/Argo", "max_issues_repo_path": "doc/serverless-unikernels.tex", "max_line_length": 78, "max_stars_count": null, "max_stars_repo_head_hexsha": "7d7dc7bd8afe862b269195706afa2d2ed86a2be3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gunjan5/Argo", "max_stars_repo_path": "doc/serverless-unikernels.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 447, "size": 1567 }
%!TEX root = inversion.tex %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Copyright (c) 2003-2018 by The University of Queensland % http://www.uq.edu.au % % Primary Business: Queensland, Australia % Licensed under the Apache License, version 2.0 % http://www.apache.org/licenses/LICENSE-2.0 % % Development until 2012 by Earth Systems Science Computational Center (ESSCC) % Development 2012-2013 by School of Earth Sciences % Development from 2014 by Centre for Geoscience Computing (GeoComp) % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{DC resistivity inversion: 3D}\label{sec:forward DCRES} This section will discuss DC resistivity\index{DC forward} forward modelling, as well as an \escript class which allows for solutions of these forward problems. The DC resistivity forward problem is modelled via the application of Ohm's Law to the flow of current through the ground. When sources are treated as a point sources and Ohm's Law is written in terms of the potential field, the equation becomes: \begin{equation} \label{ref:dcres:eq1} \nabla \cdot (\sigma \nabla \phi) = -I \delta(x-x_s) \delta(y-y_s) \delta(z-z_s) \end{equation} Where $(x,y,z)$ and $(x_s, y_s, z_s)$ are the coordinates of the observation and source points respectively. The total potential, $\phi$, is split into primary and secondary potentials $\phi = \phi_p + \phi_s$, where the primary potential is analytically calculated as a flat half-space background model with conductivity of $\sigma_p$. The secondary potential is due to conductivity deviations from the background model and has its conductivity denoted as $\sigma_s$. This approach effectively removes the singularities of the Dirac delta source and provides more accurate results \cite{rucker2006three}. An analytical solution is available for the primary potential of a uniform half-space due to a single pole source and is given by: \begin{equation} \label{ref:dcres:eq2} \phi_p = \frac{I}{2 \pi \sigma_1 R} \end{equation} Where $I$ is the current and $R$ is the distance from the observation points to the source. In \escript the observation points are the nodes of the domain and $R$ is given by \begin{equation} \label{ref:dcres:eq3} R = \sqrt{(x-x_s)^2+(y-y_s)^2 + z^2} \end{equation} The secondary potential, $\phi_s$, is given by \begin{equation}\label{ref:dcres:eq4} -\mathbf{\nabla}\cdot\left(\sigma\,\nabla \phi_s \right) = \mathbf{\nabla}\cdot\left( \left(\sigma_p-\sigma\right)\,\nabla \phi_p \right) \end{equation} where $\sigma_p$ is the conductivity of the background half-space. The weak form of above PDE is given by multiplication of a suitable test function, $w$, and integrating over the domain $\Omega$: \begin{multline}\label{ref:dcres:eq5} -\int_{\partial\Omega} \sigma\,\nabla \phi_s \cdot \hat{n} w\,ds + \int_{\Omega} \sigma\,\nabla \phi_s \cdot \nabla w\,d\Omega =\\ -\int_{\partial\Omega} \left(\sigma_p-\sigma\right)\,\nabla \phi_p \cdot \hat{n} w\,ds + \int_{\Omega} \left(\sigma_p-\sigma\right)\,\nabla \phi_p \cdot \nabla w\,d\Omega \end{multline} The integrals over the domain boundary provide the boundary conditions which are implemented as Dirichlet conditions (i.e. zero potential) at all interfaces except the top, where Neumann conditions apply (i.e. no current flux through the air-earth interface). From the integrals over the domain, the \escript coefficients can be deduced: the left-hand-side conforms to \escript coefficient $A$, whereas the right-hand-side agrees with the coefficient $X$ (see User Guide). A number a of different configurations for electrode set-up are available \cite[pg 5]{LOKE2014}. An \escript class is provided for each of the following survey types: \begin{itemize} \item Wenner alpha \item Pole-Pole \item Dipole-Dipole \item Pole-Dipole \item Schlumberger \end{itemize} These configurations are comprised of at least one pair of current and potential electrodes separated by a distance $a$. In those configurations which use $n$, electrodes in the currently active set may be separated by $na$. In the classes that follow, the specified value of $n$ is an upper limit. That is $n$ will start at 1 and iterate up to the value specified. \subsection{Usage} The DC resistivity forward modelling classes are specified as follows: \begin{classdesc}{WennerSurvey}{self, domain, primaryConductivity, secondaryConductivity, current, a, midPoint, directionVector, numElectrodes} \end{classdesc} \begin{classdesc}{polepoleSurvey}{domain, primaryConductivity, secondaryConductivity, current, a, midPoint, directionVector, numElectrodes} \end{classdesc} \begin{classdesc}{DipoleDipoleSurvey}{self, domain, primaryConductivity, secondaryConductivity, current, a, n, midPoint, directionVector, numElectrodes} \end{classdesc} \begin{classdesc}{PoleDipoleSurvey}{self, domain, primaryConductivity, secondaryConductivity, current, a, n, midPoint, directionVector, numElectrodes} \end{classdesc} \begin{classdesc}{SchlumbergerSurvey}{self, domain, primaryConductivity, secondaryConductivity, current, a, n, midPoint, directionVector, numElectrodes} \end{classdesc} \noindent Where: \begin{itemize} \item \texttt{domain} is the domain which represent the half-space of interest. it is important that a node exists at the points where the electrodes will be placed. \item \texttt{primaryConductivity} is a data object which defines the primary conductivity it should be defined on the ContinuousFunction function space. \item \texttt{secondaryConductivity} is a data object which defines the secondary conductivity it should be defined on the ContinuousFunction function space. \item \texttt{current} is the value of the injection current to be used in amps this is a currently a constant. \item \texttt{a} is the electrode separation distance. \item \texttt{n} is the electrode separation distance multiplier. \item \texttt{midpoint} is the centre of the survey. Electrodes will spread from this point in the direction defined by the direction vector and in the opposite direction, placing half of the electrodes on either side. \item \texttt{directionVector} defines as the direction in which electrodes are spread. \item \texttt{numElectrodes} is the number of electrodes to be used in the survey. \end{itemize} When calculating the potentials the survey is moved along the set of electrodes. The process of moving the electrodes along is repeated for each consecutive value of $n$. As $n$ increases less potentials are calculated, this is because a greater spacing is required and hence some electrodes are skipped. The process of building up these pseudo-sections is covered in greater depth by Loke (2014)\cite[pg 19]{LOKE2014}. These classes all share common member functions described below. For the surveys where $n$ is not specified only one list will be returned. \begin{methoddesc}[]{getPotential}{} Returns 3 lists, each made up of a number of lists containing primary, secondary and total potential differences. Each of the lists contains $n$ sublists. \end{methoddesc} \begin{methoddesc}[]{getElectrodes}{} Returns a list containing the positions of the electrodes \end{methoddesc} \begin{methoddesc}[]{getApparentResistivityPrimary}{} Returns a series of lists containing primary apparent resistivities one for each value of $n$. \end{methoddesc} \begin{methoddesc}[]{getApparentResistivitySecondary}{} Returns a series of lists containing secondary apparent resistivities one for each value of $n$. \end{methoddesc} \begin{methoddesc}[]{getApparentResistivityTotal}{} Returns a series of lists containing total apparent resistivities, one for each value of $n$. This is generally the result of interest. \end{methoddesc} The apparent resistivities are calculated by applying a geometric factor to the measured potentials.
{ "alphanum_fraction": 0.7509052316, "avg_line_length": 51.6709677419, "ext": "tex", "hexsha": "695e143aa81bd5052d930e14ec74dd5e36b62db3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0023eab09cd71f830ab098cb3a468e6139191e8d", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "markendr/esys-escript.github.io", "max_forks_repo_path": "doc/inversion/ForwardDCRES.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "0023eab09cd71f830ab098cb3a468e6139191e8d", "max_issues_repo_issues_event_max_datetime": "2019-01-14T03:07:43.000Z", "max_issues_repo_issues_event_min_datetime": "2019-01-14T03:07:43.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "markendr/esys-escript.github.io", "max_issues_repo_path": "doc/inversion/ForwardDCRES.tex", "max_line_length": 131, "max_stars_count": null, "max_stars_repo_head_hexsha": "0023eab09cd71f830ab098cb3a468e6139191e8d", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "markendr/esys-escript.github.io", "max_stars_repo_path": "doc/inversion/ForwardDCRES.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2023, "size": 8009 }
\documentclass[12pt,letterpaper]{article} \usepackage{amsmath} \usepackage[pdftex]{graphicx} % Load hyperref package without putting lines around hyperlinks. %\usepackage[hidelinks]{hyperref} \PassOptionsToPackage{hyphens}{url}\usepackage[hidelinks]{hyperref} \usepackage{color} \usepackage{xcolor} \usepackage{xspace} \usepackage{anysize} \usepackage{setspace} \usepackage{multicol} % This allows multiple columns \usepackage[nottoc,numbib]{tocbibind} % This makes refs a section \usepackage[pagewise]{lineno} \usepackage{tcolorbox} % For making boxes around text. \usepackage[xindy,toc]{glossaries} % Must come after hyperref package. %\usepackage{url} \usepackage[T1]{fontenc} % Makes > not typeset as inverted question mark. % Remove the "References" header from the bibliography. \usepackage{etoolbox} \patchcmd{\thebibliography}{\section*{\refname}}{}{}{} % Run external commands to generate a tex file containing the output of the % 'amoebae -h' command. \immediate\write18{bash generate_amoebae_help_output_text.sh} % Import stuff for formatting citations. \usepackage{natbib} % Format paragraphs. %\setlength{\parskip}{\baselineskip}% %\setlength{\parindent}{0pt}% \setlength{\parindent}{0em} \setlength{\parskip}{1em} %\renewcommand{\baselinestretch}{2.0} % Define command for wrapping large words. \newcommand*\wrapletters[1]{\wr@pletters#1\@nil} \def\wr@pletters#1#2\@nil{#1\allowbreak\if&#2&\else\wr@pletters#2\@nil\fi} % Use the listings package to automatically wrap text (unlike with just using % verbatim). \usepackage{listings} \lstset{ basicstyle=\small\ttfamily, columns=flexible, breaklines=true, keepspaces=true } %% Format section headers (this is not ideal when there are many short %%subsections. %\usepackage[tiny]{titlesec} %\titlespacing\subsection{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus 2pt} %\titlespacing\subsubsection{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus 2pt} %\titlespacing\subsubsection{0pt}{12pt plus 4pt minus 2pt}{0pt plus 2pt minus %2pt} \marginsize{2.5 cm}{2.5 cm}{1 cm}{1 cm} % Works out to one inch margins. %\parindent 1cm \graphicspath{{figures/}} %\pagenumbering{arabic} \pagenumbering{roman} \makeglossaries \begin{document} \begin{titlepage} \centering {\huge AMOEBAE command line interface documentation\par} \vspace{2cm} {\Large Lael D. Barlow\par} \vfill {\large Version of \today\par} \end{titlepage} %Optional table of contents. \newpage \tableofcontents \newpage \pagenumbering{arabic} % Start line numbers on this page \begin{linenumbers} \section{Command reference} Documentation for each AMOEBAE command and the various options may be accessed from the command line via the "-h" options. The following command reference information is the output of running amoebae (and each command) with the "-h" option. % Import tex file output from the generate_amoebae_help_output_text.sh. \input{amoebae_help_output.tex} %\printglossaries % https://en.wikibooks.org/wiki/LaTeX/Glossary \newpage % End line numbering. \end{linenumbers} %% The unsrt style orders references by appearance, but puts given names first. %% The plain style orders references alphabetically, but puts surnames first. %\bibliographystyle{laelstyle5} %\begin{multicols}{2} %{\footnotesize % The footnotesize command makes the text smaller. %\bibliography{references/AMOEBAE}} %\end{multicols} \end{document}
{ "alphanum_fraction": 0.7712938403, "avg_line_length": 28.041322314, "ext": "tex", "hexsha": "d3dcb2e7011bec45ea9d98d599d7d56929a64216", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-07-31T21:21:15.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-31T21:21:15.000Z", "max_forks_repo_head_hexsha": "3c6607bcb64a60baee2f19f0a25e14b325e9725d", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "laelbarlow/amoebae", "max_forks_repo_path": "documentation/AMOEBAE_documentation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3c6607bcb64a60baee2f19f0a25e14b325e9725d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "laelbarlow/amoebae", "max_issues_repo_path": "documentation/AMOEBAE_documentation.tex", "max_line_length": 82, "max_stars_count": 8, "max_stars_repo_head_hexsha": "3c6607bcb64a60baee2f19f0a25e14b325e9725d", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "laelbarlow/amoebae", "max_stars_repo_path": "documentation/AMOEBAE_documentation.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-28T08:32:05.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-16T21:36:38.000Z", "num_tokens": 1011, "size": 3393 }
% Use only LaTeX2e, calling the article.cls class and 12-point type. \documentclass[12pt,a4paper]{article} \usepackage[english]{babel} \usepackage[utf8]{inputenc} \usepackage[parfill]{parskip} \usepackage{textcomp} \usepackage{hyperref} \usepackage{listings} \usepackage{color} \usepackage{graphicx} \usepackage{lscape} % Include your paper's title here \title{IBMCNX Scripting Documentation} \author {Christoph Stöttner\\ \normalsize{E-mail: [email protected]}\\ \normalsize{Blog: http://www.stoeps.de} } \lstset{language=python} \date{} \begin{document} % Make the title. \maketitle \begin{abstract} This document is the main documentation of the ibmcnxscripting project. You find tips around jython and how you can create your own scripts for IBM\textsuperscript{\textregistered} Connections and IBM\textsuperscript{\textregistered} WebSphere\textsuperscript{\textregistered} Application Server Administration. \end{abstract} \pagenumbering{Roman} \newpage \tableofcontents \newpage \pagenumbering{arabic} \section{Jython} Jython is the Java implementation of Python. I think it is easy to learn and very powerful! Jython and Python are interpreting languages and you can test your code in the Jython console or wsadmin. Start Jython and type your code, easy to test and you will get fast results. There are some good online resources and books to learn the basics of Jython or Python: \subsection{Learning Resources} \subsubsection{Online materials} \paragraph{Online Learning (Python)} You can use Python learning materials for your first steps! It is very similar to learn Jython. \begin{itemize} \item \url{ http://www.codecademy.com/} \item \url{http://learnpythonthehardway.org/book/} \end{itemize} \paragraph{Reference} \begin{itemize} \item\url{http://www.jython.org/jythonbook/en/1.0/} \item \url{http://www.jython.org/docs/index.html} \end{itemize} \subsubsection{Books} WebSphere Application Server Administration Using Jython (2009)\\ Authors: Robert A. Gibson, Arthur Kevin McGrath and Noel J. Bergman The Definitive Guide to Jython: Python for the Java Platform (2010)\\ Authors: Josh Juneau, Frank Wierzbicki, Leo Soto and Victor Ng \subsection{Language elements} Grouping in Jython is done with an indention of four spaces! Please do not use tab, because i had several issues with code errors on Windows, when i have used tab grouping. \subsubsection{Comments} Comments in Jython start with \# and all text after this sign will be ignored! \subsubsection{Variables} \begin{lstlisting}[style=Python] # Defining a String x = 'Hello World' x = "Hello World Two" # Defining an integer y = 10 # Float z = 8.75 # Complex i = 1 + 8.07j # Multiple assignment x, y, z = 1, 2, 3 \end{lstlisting} \subsubsection{Ranges} \begin{lstlisting}[style=Python] wsadmin>range(7) [0, 1, 2, 3, 4, 5, 6] # Include a step in the range wsadmin>range(0,10,3) [0, 3, 6, 9] # Good base for loops wsadmin>range(1,11) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] wsadmin>range(20,27) [20, 21, 22, 23, 24, 25, 26] \end{lstlisting} \subsubsection{Lists} \begin{lstlisting}[style=Python] #List wsadmin>dbs = ['activities','blogs','communities','dogear'] wsadmin>dbs[1] 'blogs' \end{lstlisting} \subsubsection{Dictionaries} \begin{lstlisting}[style=Python] # Dictionary with Performance Data wsadmin>minConnections = {'activities':1,'blogs':1} wsadmin>maxConnections = {'activities':50,'blogs':250} wsadmin>maxConnections {'activities': 50, 'blogs': 250} wsadmin>maxConnections.keys() ['activities', 'blogs'] wsadmin>maxConnections.values() [50, 250] wsadmin>maxConnections['blogs'] 250 \end{lstlisting} \subsubsection{if - elif - else} \textbf{Remember:} Grouping of elements is done with an indent of 4 spaces! \begin{lstlisting}[style=Python] # Basic if condition : # print or do something elif other condition : # print or do something other else : # print or do completely different \end{lstlisting} \begin{lstlisting}[style=Python] # Example if value.find( 'CLFWY0217E' ) > -1 : print "\t\tuser already converted" elif value.find( 'CLFWY0212E' ) > -1 : print "\t\tuser not found in database" elif value.find( ' CLFWY0209E' ) > -1 : print "\t\tnew identifier '" + data[1] + "' does not exist." else : print '\t\tException value: ' + value \end{lstlisting} \subsubsection{Errorhandling} You can catch errors with try and except. \begin{lstlisting}[style=python] try: # perform some commands which can raise an exception except Exception, value: # perform exception handling finally: # perform task which must always be completed \end{lstlisting} \section{WebSphere Application Server} After installing IBM Connections lots of configuration must be done through ISC (Integrated Solution Console). ISC is a good GUI with lots menus and sometimes long click paths. Configuration is possible through \texttt{wsadmin} too, but it is hard to find the necessary commands. \subsection{Console Preferences} \subsection{wsadmin} Long casesensitiv commands with mostly casesensitiv parameters. Hard to remember and on Linux\textsuperscript{\textregistered} no history to recall commands. Always start \texttt{wsadmin} in \lstinline[style=BashInputStyle]{WAS_HOME/profiles/Dmgr01/bin}! \subsubsection{Start wsadmin} \paragraph{Windows\textsuperscript{\textregistered}:} \lstinline[style=command.com]{wsadmin.bat -lang jython -username wasadmin -password password} \paragraph{Linux and AIX\textsuperscript{\textregistered}:} \lstinline[style=BashInputStyle]{./wsadmin.sh -lang jython -username wasadmin -password password} \subsubsection{wsadmin set jython as default language} You can set the default language which is used with wsadmin, so you can save some character in typing. \begin{lstlisting}[style=BashInputStyle] edit WAS_HOME/profiles/Dmgr01/properties/wsadmin.properties \end{lstlisting} \paragraph{Default:} \texttt{com.ibm.ws.scripting.defaultLang=JACL} \paragraph{Set Jyton:} \texttt{com.ibm.ws.scripting.defaultLang=jython} \subsection{Wsadmin commands} There are five management objects within wsadmin. These objects group the commands to the different MBean Servers. \begin{itemize} \item AdminConfig \item AdminApp \item AdminTask \item AdminControl \item Help \end{itemize} \includegraphics[keepaspectratio=true,width=\textwidth]{wsadmin-objects.png} \subsubsection{AdminApp Object} Use the AdminApp object to \begin{itemize} \item Installing and uninstalling applications \item Listing applications \item Editing applications or modules \end{itemize} \paragraph{Examples} \begin{itemize} \item List of all applications \begin{itemize} \item \lstinline{print AdminApp.list()} \item \lstinline{AdminApp.list()} \item \lstinline{list=AdminApp.list().split('\n')} \end{itemize} \item Change options of applications \begin{itemize} \item \lstinline{AdminApp.edit('appname',['options'])} \end{itemize} \end{itemize} \subsubsection{AdminConfig} \begin{itemize} \item manage the configuration information that is stored in the repository \item Example change min- and maxConnections of the DataSource Blogs \end{itemize} \begin{landscape} \begin{lstlisting} wsadmin>AdminConfig.getid('/DataSource:blogs/') 'blogs(cells/cnxwas1Cell01|resources.xml#DataSource_1371479885975)' wsadmin>dataSource1=AdminConfig.getid('/DataSource:blogs/') wsadmin>print AdminConfig.show(dataSource1) [authDataAlias blogsJAASAuth] [authMechanismPreference BASIC_PASSWORD] [connectionPool (cells/cnxwas1Cell01|resources.xml#ConnectionPool_1384252180672)] [datasourceHelperClassname com.ibm.websphere.rsadapter.DB2UniversalDataStoreHelper] [description "Blogs DB2 DataSource"] [...] [jndiName jdbc/rollerdb] [name blogs] [...] [provider blogsJDBC(cells/cnxwas1Cell01|resources.xml#JDBCProvider_1371479882710)] [providerType "DB2 Universal JDBC Driver Provider"] [statementCacheSize 100] wsadmin>AdminConfig.modify( dataSource1, '[[statementCacheSize 50]]') '' wsadmin>AdminConfig.modify( dataSource1, '[[connectionPool [[minConnections 10] [maxConnections 100]]]]' ) '' wsadmin>AdminConfig.save() '' \end{lstlisting} \end{landscape} \section{IBM Connections wsadmin commands} Each application need its own commands: \texttt{execfile("connectionsConfig.py")}\\ \texttt{execfile("activitiesAdmin.py")}\\ ... \subsection{Load all administrative commands on startup} Please be careful, when you use this tip, because in multicluster environments, each execfile("appnameAdmin.py") will ask you to select a node, where it will run. You can create a script with all commands for preloading: \texttt{loadAll.py:} \begin{lstlisting} execfile('connectionsConfig.py') execfile("activitiesAdmin.py") execfile("blogsAdmin.py") execfile("communitiesAdmin.py") execfile("dogearAdmin.py") execfile("filesAdmin.py") execfile("forumsAdmin.py") execfile("homepageAdmin.py") execfile("newsAdmin.py") execfile("profilesAdmin.py") execfile("wikisAdmin.py") \end{lstlisting} Load this script at wsadmin startup:\\ \lstinline{./wsadmin.sh -lang jython -profile loadAll.py} \appendix \section{Linux Bash Goodies} \begin{landscape} \subsection{.bashrc} \begin{lstlisting} # Insert ulimit for file limit ulimit -n 64000 # set WAS_HOME export WAS_HOME=/opt/IBM/WebSphere/AppServer # set profileNames dmgrProfile=Dmgr01 appSrvProfile=AppSrv01 alias dmgrBin='cd $WAS_HOME/profiles/$dmgrProfile/bin' alias wsadmin='cd $WAS_HOME/profiles/$dmgrProfile/bin;./wsadmin.sh -lang jython' alias nodeBin='cd $WAS_HOME/profiles/$appSrvProfile/bin' alias startNode='$WAS_HOME/profiles/$appSrvProfile/bin/startNode.sh' alias startDmgr='$WAS_HOME/bin/startManager.sh' alias stopNode='$WAS_HOME/profiles/$appSrvProfile/bin/stopNode.sh' alias stopDmgr='$WAS_HOME/bin/stopManager.sh' alias nodeLog='tail -f $WAS_HOME/profiles/$appSrvProfile/logs/nodeagent/SystemOut.log' alias InfraLog='tail -f $WAS_HOME/profiles/$appSrvProfile/logs/InfraCluster_server1/SystemOut.log' alias Cluster1Log='tail -f $WAS_HOME/profiles/$appSrvProfile/logs/Cluster1_server1/SystemOut.log' alias Cluster2Log='tail -f $WAS_HOME/profiles/$appSrvProfile/logs/Cluster2_server1/SystemOut.log' \end{lstlisting} \end{landscape} \end{document}
{ "alphanum_fraction": 0.7699365544, "avg_line_length": 28.3011049724, "ext": "tex", "hexsha": "cc11350833ec62c607662de50b3b830329e4a27b", "lang": "TeX", "max_forks_count": 17, "max_forks_repo_forks_event_max_datetime": "2021-03-18T13:39:49.000Z", "max_forks_repo_forks_event_min_datetime": "2015-09-18T16:27:14.000Z", "max_forks_repo_head_hexsha": "d433c6a71964c3743c5fc43e78fdbb0429d0e7bd", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "stoeps13/ibmcnxscripting", "max_forks_repo_path": "documentation/ibmcnxscript.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d433c6a71964c3743c5fc43e78fdbb0429d0e7bd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "stoeps13/ibmcnxscripting", "max_issues_repo_path": "documentation/ibmcnxscript.tex", "max_line_length": 315, "max_stars_count": 19, "max_stars_repo_head_hexsha": "d433c6a71964c3743c5fc43e78fdbb0429d0e7bd", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "stoeps13/ibmcnxscripting", "max_stars_repo_path": "documentation/ibmcnxscript.tex", "max_stars_repo_stars_event_max_datetime": "2021-09-19T12:26:43.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-08T12:44:03.000Z", "num_tokens": 2864, "size": 10245 }
%!TEX root = ../report.tex % % Objectives % \section{Objectives} \label{sec:objectives} % ~1 page %Clearly explain the project objectives. % Develop a framework that allows... % Develop an app for a smart place, abstracting % beacons, think only in POI and their meaning % Users only need one app for any smart place % The same app can be used, for space owners, to install apps We propose to build a framework to develop proximity-based web mobile apps, or, according to our definition of Smart Place, web mobile apps for Smart Places. Part of the framework will need to run in the user's mobile device. The other part will run on the device's browser and it will be what developers will have access to. One of the main goals is to implement the framework itself. Also, a Smart Place will be created and two versions of a simple app for it will be developed, using our approach and using a SDK to interact with the beacons. We want to test how much our solution improves the development of mobile apps for Smart Places. Also, we want to make these Smart Places accessible by anyone who has a mobile device with Bluetooth, at least, version 4.0 and without the need to install a large number of apps. \subsection{Developers} \label{sub:developers} In this project, we want to create a solution that gives, to developers, abstractions for the technology of the POIs and the backend. The main goal is to allow them to develop mobile apps, in such a way that they do not need to write any code to get identifiers from beacons and get data from a backend. Depending on the POI to where the user is near, we aim to allow developers to perform any computation, instead of just redirect the user to an URL. For instance, in the restaurant scenario in section \ref{sub:bluetooth_low_energy}, developers would write the code to get the table's number where the user is and make the order. They would not need to know how to perform the mapping between beacons and tables \subsection{Owners} \label{sub:owners} Another goal we want to achieve is to allow owners of Smart Places to configure the POIs using just one app. All they would need to do is just to deploy the BLE Beacons that they want, in their Smart Places, and use their mobile devices to choose which web app will ``run'' there and what is the meaning of each POI. % Given that, we have two main objectives. First, the framework % itself and second, the Smart Places App. % The framework should allow developers to develop their apps, % without needing to write the code to get the beacons' signals % and get more information from the backend. That would be the % framework's job. Developers would only need to write the code % that will run in each POI. Each POI would have a name and % some parameters specified by the developer. % For instance, in the example of the restaurant, % in \ref{sec:introduction}, the POI could have the name Table and % a parameter could be the number of the table. % Developers would only need to write their apps using web % technologies, such as HTML and JavaScript. % Part of the framework will run on the users' smartphones. % We will develop the Smart Places App, for Android, which would % be an app that will scan for beacons, and will get the % corresponding information about the POIs that are being % represented by the scanned beacons. % The user will be notified about the POIs % that were found and then, he can select one of them and % start using the associated app. % Also, the smart place owner, would be able to use this app % to install apps, developed using this framework, in his % smart place. % To summarize, the framework will allow developers to develop % apps for smart places using nothing more than web technologies % and without writing the code to scan nearby beacons. % The Smart Places App will allow the users to access any app, % developed using this framework, in any smart place. Also, % the smart places owner could install apps in their spaces % using this same app.
{ "alphanum_fraction": 0.7753117207, "avg_line_length": 41.3402061856, "ext": "tex", "hexsha": "31cf1de194390a5daa6cc31902e8236318141b93", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "samfcmc/master-project", "max_forks_repo_path": "sections/4-objectives.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "samfcmc/master-project", "max_issues_repo_path": "sections/4-objectives.tex", "max_line_length": 94, "max_stars_count": null, "max_stars_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "samfcmc/master-project", "max_stars_repo_path": "sections/4-objectives.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 933, "size": 4010 }
\chapter{Values and Variables} \label{VV-Chapter} \textsc{Perspective}: No feature of the Icon programming language has a greater impact on the implementation than untyped variables --- variables that have no specific type associated with them. This feature originated in Icon's predecessors as a result of a desire for simplicity and flexibility. The absence of type declarations reduces the amount that a programmer has to learn and remember. It also makes programs shorter and (perhaps) easier to write. The flexibility comes mainly from the support for heterogeneous aggregates. A list, for example, can contain a mixture of strings, integers, records, and other lists. There are numerous examples of Icon programs in which this flexibility leads to programming styles that are concise and simple. Similarly, {\textquotedbl}generic{\textquotedbl} procedures, whose arguments can be of any type, often are useful, especially for modeling experimental language features. While these facilities can be provided in other ways, such as by C's union construct, Icon provides them by the \textit{absence} of features, which fits with the philosophy of making it easy to write good programs rather than hard to write bad ones. The other side of the coin is that the lack of type declarations for variables makes it impossible for the translator to detect most type errors and defers type checking until the program is executed. Thus, a check that can be done only once at translation time in a language with a strong compile-time type system must be done repeatedly during program execution in Icon. Furthermore, just as the Icon translator cannot detect most type errors, a person who is writing or reading an Icon program does not have type declarations to help clarify the intent of the program. Icon also converts arguments to the expected type where possible. This feature is, nevertheless, separable from type checking; Icon could have the latter without the former. However, type checking and conversion are naturally intertwined in the implementation. As far as the implementation is concerned, untyped variables simplify the translator and complicate the run-time system. There is little the translator can do about types. Many operations are polymorphic, taking arguments of different types and sometimes performing significantly different computations, depending on those types. Many types are convertible to others. Since procedures are data values and may change meaning during program execution, there is nothing the translator can know about them. For this reason, the translator does not attempt any type checking or generate any code for type checking or conversion. All such code resides in the run-time routines for the functions and operations themselves. There is a more subtle way in which untyped variables influence the implementation. Since any variable can have any type of value at any time, and can have different types of values at different times, all values must be the same size. Furthermore, Icon's rich repertoire of data types includes values of arbitrary size-lists, tables, procedures, and so on. The solution to this problem is the concept of a \textit{descriptor}, which either contains the data for the value, if it is small enough, or else contains a pointer to the data if it is too large to fit into a descriptor. The trick, then, is to design descriptors for all of Icon's data types, balancing considerations of size, ease of type testing, and efficiency of accessing the actual data. \section{Descriptors} Since every Icon value is represented by a descriptor, it is important that descriptors be as small as possible. On the other hand, a descriptor must contain enough information to determine the type of the value that it represents and to locate the actual data. Although values of some types cannot possibly fit into any fixed-size space, it is desirable for frequently used, fixed-sized values, such as integers, to be stored in their descriptors. This allows values of these types to be accessed directly and avoids the need to provide storage elsewhere for such values. \PrimaryIndexBegin{Descriptors!v-word} \PrimaryIndexBegin{Descriptors!d-word} If Icon were designed to run on only one kind of computer, the size and layout of the descriptor could be tailored to the architecture of the computer. Since the implementation is designed to run on a wide range of computer architectures, Icon takes an approach similar to that of C. Its descriptor is composed of ``words,'' which are closely related to the concept of a word on the computer on which Icon is implemented. One word is not large enough for a descriptor that must contain both type information and an integer or a pointer. Therefore, a descriptor consists of two words, which are designated as the \textit{d-word} and the \textit{v-word}, indicating that the former contains descriptive information, while the latter contains the value \begin{picture}(300,50) \put(100,10){\dvbox{}{}{}} \put(100,10){\rightboxlabels{d-word}{v-word}} \end{picture} The dotted line between the two words of a descriptor is provided for readability. A descriptor is merely two words, and the fact that these two words constitute a descriptor is a matter of context. The v-word of a descriptor may contain either a value, such as an integer, or a pointer to other data. In C terms. the v-word may contain a variety of types, including both \texttt{int}s and pointers. On many computers, C \texttt{int}s and C pointers are the same size. For some computers, however, C compilers have a memory-model in which integers are smaller than pointers, which must allow access to a large amount of memory. In this situation, the C \texttt{long} or \texttt{long long} type are the same size as C pointers. There are computers with many different word sizes, but the main considerations in the implementation of Icon are the accommodation of computers with 32- and 64-bit words and the large-memory model, in which pointers are larger than integers. In the large-memory model, a v-word must accommodate the largest of the types. The d-words of descriptors contain a type code (a small integer) in their least significant bits and flags in their most significant bits. There are twelve type codes that correspond to source-language data types: \PrimaryIndexEnd{Descriptors!d-word} \PrimaryIndexEnd{Descriptors!v-word} \begin{tabular}{l@{\hspace{1in}}l} \textit{data type} & \textit{type code} \\ null & \texttt{null}\\ integer & \texttt{integer} or \texttt{long}\\ real number & \texttt{real}\\ cset & \texttt{cset}\\ file & \texttt{file}\\ procedure & \texttt{proc}\\ list & \texttt{list}\\ set & \texttt{set}\\ table & \texttt{table}\\ record & \texttt{record}\\ co-expression & \texttt{coexpr}\\ \end{tabular} Other type codes exist for internal objects, which are on a par with source-language objects, from an implementation viewpoint, but which are not visible at the source-language level. The actual values of these codes are not important, and they are indicated in diagrams by their type code names. \subsection{Strings} There is no type code for strings. They have a special representation in which the d-word contains the length of the string (the number of characters in it) and the v-word points to the first character in the string: \begin{center} \begin{picture}(200,32) \put(0,0){\dvboxptr{\textit{n}}{}{50}{first character}} \put(0,0){\trboxlabel{length}} \end{picture} \end{center} String descriptors are called \textit{qualifiers}. In order to make qualifiers more intelligible in the diagrams that follow, a pointer to a string is followed by the string in quotation marks rather than by an address. For example, the qualifier for \texttt{{\textquotedbl}hello{\textquotedbl}} is depicted as \begin{center} \begin{picture}(200,32) \put(0,0){\dvboxptr{5}{}{50}{"hello"}} \put(0,0){\trboxlabel{length}} \end{picture} \end{center} In order to distinguish qualifiers from other descriptors with type codes that might be the same as a string length, all descriptors that are not qualifiers have an \texttt{n} flag in the most significant bit of the d-word. The d-words of qualifiers do not have this \texttt{n} flag, and string lengths are restricted to prevent their overflow into this flag position, the most significant bit of a 32- or 64-bit dword. \subsection{The Null Value} A descriptor for the null value has the form \begin{center} \begin{picture}(200,32) \put(0,0){\dvbox{null}{n}{0}} \end{picture} \end{center} As explained previously, the \texttt{n} flag occurs in this and all other descriptors that are not qualifiers so that strings can be easily and unambiguously distinguished from all other kinds of values. The value in the v-word could be any constant value, but zero is useful and easily identified{---}and suggests {\textquotedbl}null.{\textquotedbl} In the diagrams that follow, a null block-pointer is represented as \begin{center} \begin{picture}(200,16) \put(0,0){\nullptrbox{}} \end{picture} \end{center} \noindent Icon version 6 used descriptors in blocks to refer to other blocks (see section 4.2). Subsequent versions switched to using pointers. \subsection{Integers} Icon supports word-size integers at least 32-bits in size. Such integers therefore are typically C longs, depending on the computer architecture. As long as it fits, the value of an Icon integer is stored in the v-word of its descriptor. For example, the integer 13570 is represented by \begin{center} \begin{picture}(200,32) \put(0,0){\dvbox{integer}{n}{13750}} \end{picture} \end{center} Note that the \texttt{n} flag distinguishes this descriptor from a string whose first character might be at the address 13570 and whose length might have the same value as the type code for integer. An Icon integer that fits in the v-word is stored there. An integer that is too large to fit into a word is stored in a data structure that is pointed to by the v-word, as illustrated in the next section. The two representations of integers are distinguished by different internal type codes: integer for integers that are contained in the v-words of their descriptors and lrgint for integers that are contained in blocks pointed to by the v-words of their descriptors. Thus, there are two internal types for one source-language data type. \section{Blocks} All other types of Icon data are represented by descriptors with v-words that point to blocks of words. These blocks have a comparatively uniform structure that is designed to facilitate their processing during garbage collection. The first word of every block, called its \textit{title}, contains a type code. This type code is the same code that is in the type-code portion of the d-word of a descriptor that points to the block. Some blocks are fixed in size for all values of a given type. For example, on a computer with 32-bit words, the source language integer 5,000,000,000 is stored in a large integer block: \begin{picture}(300,80) \put(0,32){\dvboxptr{lrgint}{np}{60}{}} \put(140,0){\blklrgbox{lrgint}{5,000,000,000}} \put(140,17){\trboxlabel{title}} \end{picture} \noindent The \texttt{p} flag in the descriptor indicates that the v-word contains a pointer to a block. Blocks of some other types, such as record blocks, vary in size from value to value, but any one block is fixed in size and never grows or shrinks. If the type code in the title does not determine the size of the block, the second word in the block contains its size in bytes. In the diagrams that follow, the sizes of blocks are given for computers with 32-bit words. The diagrams would be slightly different for computers with 16-bit words. Records, which differ in size depending on how many fields they have, are examples of blocks that contain their sizes. For example, given the record declaration \iconline{ \>record complex(r, i) } \noindent and \iconline{ \>point := complex(1, 3) } \noindent the value of \texttt{point} is \label{ComplexRecord} \begin{picture}(300,150)(-20,0) \put(0,112){\dvboxptr{record}{np}{60}{}} \put(140,96){\blkbox{record}{32}} \put(140,96){\rightboxlabels{title}{size of block in bytes}} \put(140,80){\wordbox{\textit{id}}{}} \put(140,64){\wordboxptr{50}{record constructor}} \put(140,32){\dvbox{integer}{n}{1}} \put(140,0){\dvbox{integer}{n}{3}} \end{picture} The record-constructor block contains information that was used to resolve field references, but has now been mostly superseded -- see chapter \ref{Records-Classes} for further details. \label{Block-Id-Definition} The \textit{id} field is present in many blocks. Its purpose is to distinguish between different blocks of the same type (for example, it is used in the computation of the hash value that determines where to place a value in a table --- see section 7.3 for details). The \textit{id} field is also printed out by diagnostic routines; it is incremented for each block created. The runtime system maintains separate {\em id} counters for blocks of different types. With the declaration \iconline{ \>record term(value, code, count) } \noindent and \iconline{ \>word := term("chair", "noun", 4) } \noindent the value of word is: \begin{picture}(300,190)(-20,0) \put(0,144){\dvboxptr{record}{np}{60}{}} \put(140,128){\blkbox{record}{40}} \put(140,128){\rightboxlabels{title}{size of block}} \put(140,112){\wordbox{\textit{id}}{}} \put(140,96){\wordboxptr{50}{record-constructor}} \put(140,64){\dvboxptr{5}{}{50}{"chair"}} \put(140,32){\dvboxptr{4}{}{50}{"noun"}} \put(140,0){\dvbox{integer}{n}{4}} \end{picture} As illustrated by these examples, blocks may contain descriptors as well as non-descriptor data. Non-descriptor data comes first in the block, followed by any descriptors, as illustrated by the preceding figure. The location of the first descriptor in a block is constant for all blocks of a given type, which facilitates garbage collection. Block-pointers may be placed anywhere before the descriptors, but the garbage collector expects them to be contiguous and in a fixed place for all blocks of a given type. Blocks for the remaining types are described in subsequent chapters. \section{Variables} Variables are represented by descriptors, just as values are. This representation allows values and variables to be treated uniformly in terms of storage and access. Variables for identifiers point to descriptors for the corresponding values. Variables always point to descriptors for values, never to other variables. For example, if \iconline{ \> s := "hello" } \noindent then a variable for \texttt{s} has the form \begin{picture}(300,64)(-30,0) \put(0,16){\tlboxlabel{\texttt{s}}} \put(0,16){\dvboxptr{}{npv}{60}{}} \put(140,0){\dvboxptr{5}{}{50}{"hello"}} \end{picture} \noindent The v flag distinguishes descriptors for variables from descriptors for values. The values of local identifiers are kept on a stack, while the values of global and static identifiers are located at fixed places in memory. Variables that point to the values of identifiers are created by icode instructions that correspond to the use of the identifiers in the program. Some variables, such as record field references, are computed. A variable that references a value in a data structure points to the start of the data structure. The least-significant bits of the d-word for such a variable contain the offset, in \textit{words}, of the value descriptor from the top of the block in which the value is contained. The use of words, rather than bytes, allows larger offsets, which is important for computers with 16-bit words. For example, the variable \texttt{word.count} for the record given in the preceding section is \begin{picture}(300,180)(-20,0) \put(0,16){\dvboxptr{8}{npv}{40}{}} \put(150,128){\blkbox{record}{40}} \put(120,24){\line(0,1){128}} \multiput(120,24)(4,0){9}{\line(1,0){2}} \put(146,24){\vector(1,0){4}} \put(120,152){\vector(1,0){30}} \put(150,112){\wordbox{\textit{id}}{}} \put(150,96){\wordboxptr{50}{record-constructor block}} \put(150,64){\dvboxptr{5}{}{50}{"chair"}} \put(150,32){\dvboxptr{4}{}{50}{"noun"}} \put(150,0){\dvbox{integer}{n}{4}} \end{picture} Note that the variable \texttt{word.count} cannot be determined at translation time, since the type of word is not known then and different record types could have count fields in different positions. \subsection{Operations on Variables} There are two fundamentally different contexts in which a variable can be used: \textit{dereferencing} and \textit{assignment}. Suppose, as shown previously, that the value of the identifier s is the string {\textquotedbl}hello{\textquotedbl}. Then a variable descriptor that points to the value of s and the corresponding value descriptor for {\textquotedbl}hello{\textquotedbl} have the following relationship: In an expression such as \texttt{write(s)}, \texttt{s} is dereferenced by fetching the descriptor pointed to by the v-word of the variable. \begin{picture}(300,64)(-30,0) \put(0,16){\tlboxlabel{\texttt{s}}} \put(0,16){\dvboxptr{}{npv}{60}{}} \put(140,0){\dvboxptr{5}{}{50}{"hello"}} \end{picture} In the case of assignment, as in \iconline{ \>s := 13570 } \noindent the value descriptor pointed to by the v-word of the variable descriptor changed: \begin{picture}(300,64)(-30,0) \put(0,16){\tlboxlabel{\texttt{s}}} \put(0,16){\dvboxptr{}{npv}{60}{}} \put(140,0){\dvbox{integer}{n}{13570}} \end{picture} These operations on variables correspond to indirect load and store instructions of a typical computer. \subsection{Special Variables} Icon has several variables with special properties that complicate assignment and dereferencing. Consider, for example, the keyword \texttt{\&trace}. Its value must always be an integer. Consequently, in an assignment such as \iconline{ \>\&trace := \textit{expr} } \noindent the value produced by \textit{expr} must be checked to be sure that it is an integer. If it is not, an attempt is made to convert it to an integer, so that in \iconline{ \>\&trace := "1" } \noindent the value assigned to \texttt{\&trace} is the integer \texttt{1}, not the string \texttt{{\textquotedbl}1{\textquotedbl}}. A naive way to handle assignment to these keywords is to check every variable during assignment to see whether it is one that requires special processing. This would place a significant computational burden on every assignment. Instead, Icon divides variables into two classes: \textit{ordinary} and \textit{trapped}. Ordinary variables point to their values as illustrated previously and require no special processing. Trapped variables, so called because their processing is {\textquotedbl}trapped,{\textquotedbl} are distinguished from ordinary variables by a \texttt{t} flag. Thus, assignment only has to check a single flag to separate the majority of variables from those that require special processing. \subsubsection{Trapped Variables} Icon Version 6 used the trapped variable approach to handle assignments to keywords, substrings and table elements. Subsequent versions of Icon (and Unicon) no longer use trapped variables for assignments to keywords: instead keyword variables have different types to ``normal'' variables. Trapped variables are still in use however to deal with the special processing required for assignment to sub-strings and table elements --- see section 12.2. %% A trapped-variable descriptor for a keyword points to a block that %% contains the value of the keyword, its string name, and a pointer to a %% C function that is called when assignment to the keyword is made. For %% example, the trapped variable for \texttt{\&trace} is: %% %--%\ \ \includegraphics[width=3.9543in,height=1.4398in]{ib-img/ib-img017.jpg} %% \begin{picture}(300,120)(-20,0) %% \put(0,80){\dvboxptr{tvkywd}{nptv}{60}{}} %% \put(140,64){\blkboxptr{tvkywd}{50}{assignment function}} %% \put(140,32){\dvbox{integer}{n}{0}} %% \put(140,0){\dvboxptr{6}{}{50}{"\&trace"}} %% \end{picture} It is worth noting that the more conventional approach to handling the problem of assignment to keywords and other special cases is to compile special code if the special case occurs an assignment context. It is not always possible, however, to determine the context in which a variable is used in Icon. Assume \texttt{tb} is a table and consider a procedure of the form \begin{iconcode} \>procedure tabvar(n)\\ \> ...\\ \>return tb[n]\\ \>end \end{iconcode} The semantics of Icon dictate that the result returned in this case should be a variable, not just its value, so that it is possible to write an expression such as \iconline{ \>tabvar(5) := 10 } \noindent which has the effect of assigning the value 10 to \texttt{tb[5]}. The translator has no way of knowing that an assignment to the call \texttt{tabvar(5)} is equivalent to an assignment to \texttt{tb[5]}. In fact, the translator cannot even determine that the value of \texttt{tabvar} will be a function when the previous assignment is performed, much less that it will be the procedure given earlier. The trapped-variable mechanism provides a way to handle uniformly all such situations. \PrimaryIndexBegin{Keywords} \subsubsection{Keyword Variables} Trapped variables are especially useful when dealing with cases where a variable sometimes needs special treatment and sometimes not. But keywords {\em always} require special treatment, so they have their own types, distinct from the everyday numerical and string types. The keyword type codes are defined symbolically (values for other types are given in section 4.4.3). \begin{iconcode} \#define \>\>\>T\_Kywdint \>\>\>\>\>\>\>20 \>\>\>\>/* integer keyword */\\ \#define \>\>\>T\_Kywdpos \>\>\>\>\>\>\>21 \>\>\>\>/* keyword \&pos */\\ \#define \>\>\>T\_Kywdsubj \>\>\>\>\>\>\>22 \>\>\>\>/* keyword \&subject */\\ \#define \>\>\>T\_Kywdwin \>\>\>\>\>\>\>23 \>\>\>\>/* keyword \&window */\\ \#define \>\>\>T\_Kywdstr \>\>\>\>\>\>\>24 \>\>\>\>/* string keyword */\\ \#define \>\>\>T\_Kywdevent \>\>\>\>\>\>\>25 \>\>\>\>/* keyword \&eventsource, etc. */ \end{iconcode} There are four keyword variables that require special processing for assignment: \texttt{\&trace}, \texttt{\&random}, \texttt{\&subject}, and \texttt{\&pos}. The keyword \texttt{\&random} is treated in essentially the same way that \texttt{\&trace} is. Assignment to \texttt{\&subject} requires a string value and has the side effect of assigning the value \texttt{1} to \texttt{\&pos}. Assignment to \texttt{\&pos} is even more complicated: not only must the value assigned be an integer, but if it is not positive, it must also be converted to the positive equivalent with respect to the length of \texttt{\&subject}. In any event, if the value in the assignment to \texttt{\&pos} is not in the range of \texttt{\&subject}, the assignment fails. Dereferencing these keywords, on the other hand, requires no special processing. See section 12.2 for a further discussion of keyword variables \PrimaryIndexEnd{Keywords} \section{Descriptors and Blocks in C} Descriptors and blocks of data are described and depicted abstractly in the previous sections of this chapter. In order to understand the implementation of some aspects of Icon, it is helpful to examine the C code that actually defines and manipulates data. The following sections illustrate typical C declarations for the structures used in the implementation of Icon. Some of the terminology and operations that appear frequently in the C code are included as well. Other operations are introduced in subsequent chapters. as they are needed. \subsection{Descriptors} As mentioned in Sec. 4.1, for C compilers in which ints and pointers are the same size, the size of a word is the size of an int, while if pointers are larger than ints, the size of a word is the size of a long, or a long long. The difference between these models of memory is handled by typedefs under the control of conditional compilation. Two constants that characterize the sizes are defined: IntBits and WordBits. These sizes are used to select appropriate definitions for signed and unsigned words. The fact that on some 64-bit C compilers a \texttt{long} is only 32 bits, while on others it is 64 bits, complicates matters. The symbol \texttt{LongLongWord} indicates this situation. \bigskip % re-insert colors \begin{iconcode} \>\#if IntBits != WordBits\\ \>\>{\color{blue}\#ifdef LongLongWord}\\ \>\>\>{\color{blue}typedef long long int word;}\\ \>\>\>{\color{blue}typedef unsigned long long int uword;}\\ \>\>{\color{blue}\#else}\\ \>\>\>typedef long int word;\\ \>\>\>typedef unsigned long uword;\\ \>\>{\color{blue}\#endif}\\ \>\#else \>\>\>\>\>\>\> /* IntBits != WordBits */ \\ \>typedef int word;\\ \>typedef unsigned int uword;\\ \>\#endif \end{iconcode} A descriptor is declared as a structure: \begin{iconcode} \>struct descrip \{\ \ /* descriptor */\\ \>\>word dword;\ \ /*\ \ type field */\\ \>\>union \{\\ \>\>\>word integr;\ \ /*\ \ integer value */\\ % blue {\color{blue}\#ifdef DescriptorDouble}\\ \>\>\>{\color{blue}double realval;}\\ {\color{blue}\#endif}\\ \>\>\>char *sptr;\ \ /*\ \ pointer to character string */\\ \>\>\>union block *bptr;\ \ /*\ \ pointer to a block */\\ \>\>\>struct descrip *dptr;\ \ /*\ \ pointer to a descriptor */\\ \>\>\} vword;\\ \>\}; \end{iconcode} The v-word of a descriptor is a union that reflects its various uses: an integer, a pointer to a string, a pointer to a block, or a pointer to another descriptor (in the case of a variable). \subsection{Blocks} Each block type has a structure declaration. For example. the declaration for record blocks is \begin{iconcode} \>struct b\_record \{\>\>\>\>\>\>\>\>\> /* record block */\\ \>\>word title;\>\>\>\>\>\>\>\> /*\ T\_Record */\\ \>\>word blksize;\>\>\>\>\>\>\>\> /*\ \ size of block */\\ \>\> word id;\>\>\>\>\>\>\>\> /*\ \ identification number */\\ \>\>union block *recdesc;\>\>\>\>\>\>\>\> /* pointer to record constructor */\\ \>\>struct descrip fields[1];\>\>\>\>\>\>\>\> /*\ \ fields */\\ \>\}; \end{iconcode} Blocks for records vary in size, depending on the number of fields declared for the record type. The size of 1 in \iconline{ \ \ struct descrip fields[1]; } \noindent is provided to satisfy the C compiler. Actual blocks for records are constructed at run time in a region that is managed by Icon's storage allocator. Such blocks conform to the previous declaration, but the number of fields varies. The declaration provides a means of accessing portions of such blocks from C. The declaration for substring trapped-variable blocks is \begin{iconcode} struct b\_tvsubs \{\>\>\>\>\>\>\>\>/* substring trapped variable block */\\ \>word title;\>\>\>\>\>\>\> /* T\_Tvsubs */\\ \>word sslen;\>\>\>\>\>\>\> /* length of substring */\\ \>word sspos;\>\>\>\>\>\>\> /* position of substring */\\ \>struct descrip ssvar;\>\>\>\>\>\>\>/* variable that substring is from */\\ \}; \end{iconcode} Note that the title fields of \texttt{b\_record} and \texttt{b\_tvsubs} contain type codes, as indicated in previous diagrams. The second field of \texttt{b\_record} is a size as mentioned previously, but \texttt{b\_tvsubs} has no size field, since all substring trapped-variable blocks are the same size, which therefore can be determined from their type. The block union given in the declaration of \texttt{struct descrip} consists of a union of all block types: \begin{iconcode} \>union block \{\\ \>\>struct b\_real realblk;\\ \>\>struct b\_cset cset;\\ \>\>struct b\_file file;\\ \>\>struct b\_proc proc;\\ \>\>struct b\_list list;\\ \>\>struct b\_lelem lelem;\\ \>\>struct b\_table table;\\ \>\>struct b\_telem telem;\\ \>\>struct b\_set set;\\ \>\>struct b\_selem selem;\\ \>\>struct b\_record record;\\ \>\>struct b\_tvsubs tvsubs;\\ \>\>struct b\_tvtbl tvtbl;\\ \>\>struct b\_refresh refresh;\\ \>\>struct b\_coexpr coexpr;\\ \>\>struct b\_externl externl;\\ \>\>struct b\_slots slots;\\ \>\>struct b\_bignum bignumblk;\\ \>\}; \end{iconcode} Note that there are several kinds of blocks in addition to those that correspond to source-language data types. \subsection{Defined Constants} The type codes are defined symbolically: \begin{iconcode} \#define\>\>\> T\_Null \>\>\>\>\>0\\ \#define\>\>\> T\_Integer \>\>\>\>\>1\\ \#define\>\>\> T\_Lrgint \>\>\>\>\>2\\ \#define\>\>\> T\_Real \>\>\>\>\>3\\ \#define\>\>\> T\_Cset \>\>\>\>\>4\\ \#define\>\>\> T\_File \>\>\>\>\>5\\ \#define\>\>\> T\_Proc \>\>\>\>\>6\\ \#define\>\>\> T\_Record \>\>\>\>\>7\\ \#define\>\>\> T\_List \>\>\>\>\>8\\ \#define\>\>\> T\_Lelem \>\>\>\>\>9\\ \#define\>\>\> T\_Set \>\>\>\>\>10\\ \#define\>\>\> T\_Selem \>\>\>\>\>11\\ \#define\>\>\> T\_Table \>\>\>\>\>12\\ \#define\>\>\> T Telem \>\>\>\>\>13\\ \#define\>\>\> T\_Tvtbl \>\>\>\>\>14\\ \#define\>\>\> T\_Slots \>\>\>\>\>15\\ \#define\>\>\> T\_Tvsubs \>\>\>\>\>16\\ \#define\>\>\> T\_Refresh \>\>\>\>\>17\\ \#define\>\>\> T\_Coexpr \>\>\>\>\>18\\ \#define\>\>\> T\_External \>\>\>\>\>19 \end{iconcode} The type codes in diagrams are abbreviated, as indicated by previous examples. The defined constants for d-word flags are \begin{iconcode} \>n\>\>F\_Nqual\\ \>p\>\>F\_Ptr\\ \>v\>\>F\_Var\\ \>t\>\>F\_Tvar \end{iconcode} The values of these flags depend on the word size of the computer. The d-words of descriptors are defined in terms of flags and type codes: \begin{iconcode} \#define D\_Null \>\>\>\>\>\>\>\>(T\_Null | F\_Nqual)\\ \#define D\_Integer \>\>\>\>\>\>\>\>(T\_Integer | F\_Nqual)\\ \#define D\_Lrgint \>\>\>\>\>\>\>\>(T\_Lrgint | F\_Ptr | F\_Nqual)\\ \#define D\_Real \>\>\>\>\>\>\>\>(T\_Real | F\_Ptr | F\_Nqual)\\ \#define D\_Cset \>\>\>\>\>\>\>\>(T\_Cset | F\_Ptr | F\_Nqual)\\ \#define D\_File \>\>\>\>\>\>\>\>(T\_File | F\_Ptr | F\_Nqual)\\ \#define D\_Proc \>\>\>\>\>\>\>\>(T\_Proc | F\_Ptr | F\_Nqual)\\ \#define D\_List \>\>\>\>\>\>\>\>(T\_List | F\_Ptr | F\_Nqual)\\ \#define D\_Table \>\>\>\>\>\>\>\>(T\_Table | F\_Ptr | F\_Nqual)\\ \#define D\_Set \>\>\>\>\>\>\>\>(T\_Set | F\_Ptr | F\_Nqual)\\ \#define D\_Selem \>\>\>\>\>\>\>\>(T\_Selem | F\_Ptr | F\_Nqual)\\ \#define D\_Record \>\>\>\>\>\>\>\>(T\_Record | F\_Ptr | F\_Nqual)\\ \#define D\_Telem \>\>\>\>\>\>\>\>(T\_Telem | F\_Ptr | F\_Nqual)\\ \#define D\_Lelem \>\>\>\>\>\>\>\>(T\_Lelem | F\_Ptr | F\_Nqual)\\ \#define D\_Tvsubs \>\>\>\>\>\>\>\>(T\_Tvsubs | D\_Tvar)\\ \#define D\_Tvtbl \>\>\>\>\>\>\>\>(T Tvtbl | D\_Tvar)\\ \#define D\_Coexpr \>\>\>\>\>\>\>\>(T\_Coexpr | F\_Ptr | F\_Nqual)\\ \#define D\_Refresh \>\>\>\>\>\>\>\>(T\_Refresh | F\_Ptr | F\_Nqual)\\ \#define D\_Var \>\>\>\>\>\>\>\>(F\_Var | F\_Nqual | F\_Ptr)\\ \#define D\_Tvar \>\>\>\>\>\>\>\>(D\_Var | F\_Tvar) \end{iconcode} As indicated previously, flags, type codes, and d-words are distinguished by the prefixes \texttt{F\_}, \texttt{T\_}, and \texttt{D\_}, respectively. \subsection{RTL Coding} Since the optimizing compiler was introduced in versions 8 and 9 of Icon, the routines for the run-time system use an extended C syntax called RTL (for Run-Time Language) that encodes the type information for arguments and results. Some of these are illustrated by the RTL function for the Icon operator \texttt{*x}, which produces the size of \texttt{x}: \begin{iconcode} \>operator\{1\} * size(x)\\ \>abstract \{ return integer \}\\ \>type\_case x of \{\\ \>\>string: inline \{ return C\_integer StrLen(x); \}\\ \>\>list: inline \{ return C\_integer BlkD(x,List)->size; \}\\ \>\>table: inline \{ return C\_integer BlkD(x,Table)->size; \}\\ \>\>set: inline \{ return C\_integer BlkD(x,Set)->size; \}\\ \>\>cset: inline \{\\ \>\>\>register word i = BlkD(x,Cset)->size;\\ \>\>\>if (i < 0) i = cssize(\&x);\\ \>\>\>return C\_integer i;\\ \>\>\>\}\\ \>\>...\\ \>\>default: \{\\ \>\>\>\>/*\\ \>\>\>\>\ * Try to convert it to a string.\\ \>\>\>\>\ */\\ \>\>\>\ \ if !cnv:tmp\_string(x) then\\ \>\>\>\>\ \ runerr(112, x);\ \ /* no notion of size */\\ \>\>\>\ \ inline \{\\ \>\>\>\>\ \ return C\_integer StrLen(x);\\ \>\>\>\>\ \ \}\\ \>\>\>\ \ \}\\ \>\>\}\\ end \end{iconcode} \texttt{operator} is an RTL construct that performs several operations. One of these operations is to provide a C function declaration. Since the function is called by the interpreter, the header is somewhat different from what it would be if \texttt{size} were called directly. The details are described in Chapter 8. The arguments of the Icon operation are referred to via named descriptors, such as \texttt{x}. The result that is produced is also a descriptor. RTL extends C's \texttt{return} statement to include type information, with which the d-word of the return value is set to \texttt{D\_lnteger}, since the returned value is a \texttt{C\_integer}. Next, the \texttt{type\_case} selects different branches of code depending on the type of x. In the generated code there is a test to determine if descriptor \texttt{x} holds a qualifier. \texttt{Qual()} is a macro that is defined as \iconline{ \>\#define Qual(d)\ \ (!((d).dword \& F\_Nqual)) } If \texttt{x} is a qualifier, its length is placed in the v-word of the return value descriptor, using the macros IntVal and StrLen, which are defined as \begin{iconcode} \>\#define IntVal(d)\ \ ((d).vword.integr)\\ \>\#define StrLen(d)\ \ ((d).dword) \end{iconcode} If \texttt{x} is not a qualifier, then the size depends on the type. The macro \texttt{Type()} isolates the type code \iconline{ \>\#define Type(d)\ \ ((d).dword \& TypeMask) } \noindent where the value of \texttt{TypeMask} is 63, providing considerable room for additions to Icon's internal types. For most Icon types that are represented by blocks, their source-language size is contained in their \texttt{size} field. The macro \texttt{BlkLoc()} accesses a pointer in the v-field of a descriptor and is defined as \iconline{ \>\#define BlkLoc(d) ((d).vword.bptr) } A more specialized macro \texttt{BlkD()} wraps uses of \texttt{BlkLoc()} and subsequent union member access, allowing descriptor-block consistency to be verified at run-time if desired. If the type is not one of those given, the final task is an attempt to convert \texttt{x} to a string. The RTL expression \texttt{cnv:tmp\_string()} does this, using local temporary buffer. The value of \texttt{x} is changed accordingly. A fixed-sized buffer can be used, since there is a limit to the size of a string that can be obtained by converting other types. This limit is 256, which is reached only for conversion of \texttt{\&cset}. The conversion may fail, as for \texttt{*\&null}, which is signaled by the return value 0 from \texttt{cnv:tmp\_string()}. In this case, program execution is terminated with a run-time: error message, using \texttt{runerr()}. If the conversion is successful, the size is placed in the v-word of the result, as is the case if \texttt{x} was a qualifier originally. \textsc{Retrospective}: Descriptors provide a uniform way of representing Icon values and variables. Since descriptors for all types of data are the same size, there are no problems with assigning different types of values to a variable{---}they all fit. The importance of strings is reflected in the separation of descriptors into two classes{---}qualifiers and nonqualifiers{---}by the \texttt{n} flag. The advantages of the qualifier representation for strings are discussed in Chapter 5. It is comparatively easy to add a new type to Icon. A new type code is needed to distinguish it from other types. If the possible values of the new type are small enough to fit into the v-word, as is the case for integers, no other data is needed. For example, the value of a character data type could be contained in its descriptor. For types that have values that are too large to fit into a v-word, pointers to blocks containing the data are placed in the v-words instead. Lists, sets, and tables are examples of data types that are represented this way. See Chapters 6 and 7. \bigskip \noindent\textbf{EXERCISES} \noindent\textbf{\ref*{VV-Chapter}.1} Give examples of Icon programs in which heterogeneous aggregates are used in significant ways. \noindent\textbf{\ref*{VV-Chapter}.2} Design a system of type declarations for Icon so that the translator could do type checking. Give special consideration to aggregates, especially those that may change in size during program execution. Do this from two perspectives: (a) changing the semantics of Icon as little as possible, and (b) maximizing ,the type checking that can be done by the translator at the expense of flexibility in programming. \noindent\textbf{\ref*{VV-Chapter}.3} Suppose that functions in Icon were not first-class values and that their meanings were bound at translation time. How much could the translator do in the way of error checking? \noindent\textbf{\ref*{VV-Chapter}.4} Compile a list of all Icon functions and operators. Are there any that do not require argument type checking? Are there any that require type checking but not conversion? Identify those that are polymorphic. For the polymorphic ones, identify the different kinds of computations that are performed depending on the types of the arguments. \noindent\textbf{\ref*{VV-Chapter}.5} Compose a table of all type checks and conversions that are required for Icon functions and operators. \noindent\textbf{\ref*{VV-Chapter}.6} To what extent would the implementation of Icon be simplified if automatic type conversion were not supported? How would this affect the programmer? \noindent\textbf{\ref*{VV-Chapter}.7} Why is it desirable for string qualifiers not to have flags and for all other kinds of descriptors to have flags indicating they are not qualifiers, rather than the other way around? \noindent\textbf{\ref*{VV-Chapter}.8} Is the n flag that distinguishes string qualifiers from all other descriptors really necessary? If not, explain how to distinguish the different types of descriptors without this flag. \noindent\textbf{\ref*{VV-Chapter}.9} On computers with extremely limited address space, two-word descriptors may be impractically large. Describe how one-word descriptors might be designed, discuss how various types might be represented, and describe the ramifications for storage utilization and execution speed. \noindent\textbf{\ref*{VV-Chapter}.10} Identify the diagrams in this chapter that would be different if they were drawn for a computer with 16-bit words. Indicate the differences. \noindent\textbf{\ref*{VV-Chapter}.11} There is nothing in the nature of keywords that requires them to be processed in a special way for assignment but not for dereferencing. Invent a new keyword that is a variable that requires processing when it is dereferenced. Show how to generalize the keyword trapped-variable mechanism to handle such cases. \noindent\textbf{\ref*{VV-Chapter}.12} List all the syntactically distinct cases in which the translator can determine whether a keyword variable is used in an assignment or dereferencing context. \noindent\textbf{\ref*{VV-Chapter}.13} What would be gained if special code were compiled for those cases in which the context for keyword variables could be determined?
{ "alphanum_fraction": 0.7294879939, "avg_line_length": 40.2814738997, "ext": "tex", "hexsha": "e20e4a9b5a65eb98efecbb7d506c94b8d0677347", "lang": "TeX", "max_forks_count": 16, "max_forks_repo_forks_event_max_datetime": "2022-03-01T06:01:00.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-14T04:32:36.000Z", "max_forks_repo_head_hexsha": "df79234dc1b8a4972f3908f601329591c06bd141", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "jschnet/unicon", "max_forks_repo_path": "doc/ib/p1-values.tex", "max_issues_count": 83, "max_issues_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_issues_repo_issues_event_max_datetime": "2022-03-22T11:32:35.000Z", "max_issues_repo_issues_event_min_datetime": "2019-11-03T20:07:12.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "MatthewCLane/unicon", "max_issues_repo_path": "doc/ib/p1-values.tex", "max_line_length": 86, "max_stars_count": 35, "max_stars_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "MatthewCLane/unicon", "max_stars_repo_path": "doc/ib/p1-values.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-01T06:00:40.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-29T13:19:55.000Z", "num_tokens": 10858, "size": 39355 }
%\def\draft{1} \documentclass[11pt]{article} \usepackage[reqno]{amsmath} \usepackage{natbib,amssymb,amsthm,graphicx,verbatim,url,verbatim} \usepackage{color} \usepackage{url} \usepackage{rotating} \usepackage{setspace} \usepackage[lofdepth,lotdepth]{subfig} \usepackage[top=1in, right=1in, left=1in, bottom=1.6in]{geometry} \usepackage{tikz} \usepackage{color}\definecolor{spot}{rgb}{0.6,0,0} \usepackage[pdftex, bookmarksopen=true, bookmarksnumbered=true, pdfstartview=FitH, breaklinks=true, urlbordercolor={0 1 0}, citebordercolor={0 0 1}, colorlinks=true, citecolor=spot, linkcolor=spot, urlcolor=spot, pdfauthor={Michael Shoemate}, pdftitle={Title}]{hyperref} \usetikzlibrary {positioning} \begin{document} \title{Privacy System Design Proposal} \author{Michael Shoemate} \maketitle \begin{abstract} A privacy preserving system design is proposed, containing abstractions for a statistical analysis and differentially private release. The statistical analysis is a computational graph and privacy definition. The system has three layers, one each for analysis construction, validation, and execution. Parsers and bindings may be written for analysis construction, and runtimes for analysis execution. Analysis verification is centralized in a core C++ library. The primary runtime is written in C++. \end{abstract} \section{Overview} \paragraph{Goals} The goal of this system is to provide a flexible framework on which \begin{itemize} \item Researchers can contribute new algorithms flexible enough to encompass a broad range of topics and approaches to differential privacy, \item conceptually organized enough that peer researchers and/or automated validation systems can more easily review contributed work \item and which allows both Analysts using the library and System Builders creating applications to make use of vetted differentially private algorithms with a low bar with languages native to their workflow. \end{itemize} % \paragraph{Components} % The library consists of: % \begin{itemize} % \item \textbf{components} Data in, data out. Individual nodes in a computational graph. % \item \textbf{operators} Components representing data manipulations. % \item \textbf{statistics} Components with data input, and releasable data output. % \item \textbf{mechanisms} Nondeterministic components. % \end{itemize} %\paragraph{Requirements} \section{Abstractions} \subsection{Analysis} To allow for pre- and post-processing, nested composition and code modularity, an analysis is a computational graph of instances of components. Each component in the analysis graph is either an operator or statistic. An operator may be a transformation, subset or join. The analysis conforms to either a JSON or protobuf schema. No data is stored in the analysis, it is only stored in the release. \subsection{Release} The second primary abstraction is the release. The release is a set of values that are associated with nodes in the accompanying analysis. Entries in a release include the initial variable bounds, the record count and estimated statistics. The types of released statistics may be numeric, string, or even function-valued, depending on the mechanism. Relevant information for a released statistic include the corresponding node id, value, batch, whether the release came from the user or from the runtime, and whether the value is privatized. \\ In some situations it can be useful to evaluate a computational graph to release private data. Three such examples are for evaluating loss functions, filters, and executing graphs across multiple runtimes. \section{Components} A list of components is available at \href{https://bit.ly/privacy_components}{https://bit.ly/privacy\_components}. This list is still in-progress. \subsection{Operators} Operators, or manipulations, include transformations, subsets, aggregations and joins. Analyses using manipulations are validated using Lipschitz constants in a "stability" framework. Manipulation primitives may also be reused to define arbitrary objective functions for optimization, for filters, or to describe a differentially private release of a function. \subsection{Mechanisms} Mechanisms are building blocks used by statistics, and if placed in an analysis graph, are not capable of privatizing data on their own. Example mechanisms include the laplace mechanism and exponential mechanism. \subsection{Statistics} A statistic contains several components. For example, a "Mean" may be composed of "Sum" and "Divide" components, and a "Differentially Private Mean" composed of "Impute", "Clip", "Mean" and "Laplace" components. A statistic is the only kind of component that can privatize data. \subsection{Miscellaneous} A "Constant" component propagates a value included from a release. A "Literal" component propagates a value included in its constructor. These components are easily hidden from the user. \section{System Layers} \subsection{Analysis Construction} An analysis consists of a computational graph and a privacy definition. The computational graph may be constructed via a graphical interface, manually via language bindings, or automatically via a parser. Any such tools emit an analysis and optionally a partial release. The privacy definition is simply a string label. \subsection{Analysis Validation} The C++ validator has two tasks- checking the graph and computing meta-statistics. \\ The validator ensures every path through an analysis graph passes through a privatizing statistic, and that the graph is executable (static type checking). Note that mechanisms are not capable of privatizing data alone; released data must pass through curated plans that include clipping, imputation, and have sensible aggregation/mechanism pairings. In addition to these checks, all primitives in an analysis must support the same privacy definition. For example, resampling may not be supported by concentrated privacy. Optionally checks can be made to ensure N is not being manipulated by the user through the computational graph, by enforcing the the child of N parameters to be of type "Constant". \\ The validator may also compute meta-statistics (accuracy, disclosure risk measures, confidences) from an analysis and partial release, as well as overall epsilon after applying known composition theorems. \subsection{Analysis Execution} The execution layer is the only layer with data access. The execution layer takes an analysis and optionally a partial release, and emits a release. Including the prior release will make sources of randomness across batches deterministic. Protection against timing attacks may be implemented in the execution layer. \\ An implementation of an execution layer is a runtime. A runtime is deployed remotely at the location of the data. Due to the language-agnostic representation of the analysis and release, a runtime may be written in any language, with any framework. The aim for this project is to provide one standard runtime in C++. It still remains possible to support components/algorithms that are only available in, say, Python, via a separate python runtime. In this case, the execution of an analysis/release can take multiple steps, where the analysis is partially evaluated, a release is serialized (including private data), and then passed between runtimes to continue execution. % \begin {center} % \begin {tikzpicture}[-latex ,auto ,node distance =1 cm and 5cm ,on grid , % semithick , state/.style={ draw, minimum width =2 cm}] % \node[state] (Bindings-Python) {$Python Bindings$}; % \node[state] (Bindings-R) [below=of Bindings-Python] {$R Bindings$}; % \node[state] (Validator) [right above=of Bindings-Python] {$Validator$}; % \node[state] (Runtime-Eigen) [below=of Validator] {$Eigen Runtime$}; % \node[state] (Runtime-SQL) [right=of Runtime-Eigen] {$SQL Runtime$}; % \end{tikzpicture} % \end{center} \section{Examples} \textbf{\textit{The JSON samples used in the examples are simplified.}} \subsection{Mean} To compute a mean, an analyst would first construct an analysis and a partial release. \\ 1. a partial release: \begin{verbatim} {"N": 50, "Minimum": 23, "Maximum": 45} \end{verbatim} 2. an analysis: \begin {center} \begin {tikzpicture}[-latex ,auto ,node distance =1 cm and 5cm ,on grid , semithick , state/.style ={ draw, minimum width =2 cm}] \node[state] (DataSource) {$DataSource$}; \node[state] (N) [below=of DataSource] {$Constant (N)$}; \node[state] (Minimum) [below=of N] {$Constant (Minimum)$}; \node[state] (Maximum) [below=of Minimum] {$Constant (Maximum)$}; \node[state] (DPMean) [right=of N] {$DPMean$}; \path (DataSource) edge (DPMean); \path (N) edge (DPMean); \path (Minimum) edge (DPMean); \path (Maximum) edge (DPMean); \end{tikzpicture} \end{center} The system passes the analysis and release to the validator and gets accuracy estimates. \begin{verbatim} {"DPMean": 2.3} \end{verbatim} The researcher then submits the analysis to a runtime for a new release: \begin{verbatim} {"N": 500, "Minimum": 23, "Maximum": 45, "DPMean": 30.2} \end{verbatim} \subsection{Mean with Nesting} If the record count is private and the researcher is unable to provide an estimate, then an interactive analysis may be useful. The following metadata is now used. \\ 1. a partial release: \begin{verbatim} {} \end{verbatim} 2. an analysis: \begin {center} \begin {tikzpicture}[-latex ,auto ,node distance =1 cm and 5cm ,on grid , semithick , state/.style ={ draw, minimum width =2 cm}] \node[state] (NoisyCount) {$NoisyCount$}; \node[state] (DataSource) [left=of NoisyCount] {$DataSource$}; \path (DataSource) edge (NoisyCount); \end{tikzpicture} \end{center} The system passes the analysis and release to the validator for accuracy estimates. \begin{verbatim} {"NoisyCount": 8} \end{verbatim} The researcher submits the analysis to a runtime for a release. \begin{verbatim} {"NoisyCount": 502} \end{verbatim} The researcher may now interactively extend the analysis with the mean using the estimated record count. \begin {center} \begin {tikzpicture}[-latex ,auto ,node distance =1 cm and 5cm ,on grid , semithick , state/.style ={ draw, minimum width =2 cm}] \node[state] (DataSource) {$DataSource$}; \node[state] (NoisyCount) [right=of DataSource] {$NoisyCount$}; \node[state] (Minimum) [below=of NoisyCount] {$Constant (Minimum)$}; \node[state] (Maximum) [below=of Minimum] {$Constant (Maximum)$}; \node[state] (DPMean) [right=of Minimum] {$DPMean$}; \path (DataSource) edge (NoisyCount); \path (DataSource) edge (DPMean); \path (NoisyCount) edge (DPMean); \path (Minimum) edge (DPMean); \path (Maximum) edge (DPMean); \end{tikzpicture} \end{center} The system passes the analysis and release to the validator for accuracy estimates. \begin{verbatim} {"NoisyCount": 8, "DPMean": 2.3} \end{verbatim} The researcher submits the analysis to a runtime for a release. \begin{verbatim} { "N": 50, "Minimum": 23, "Maximum": 45, "NoisyCount": 502, "DPMean": 30.2 } \end{verbatim} \subsection{Mean with Transformation} A new analysis is started, with a separate privacy budget. Given the nature of privacy, a computational graph with released statistics is immutable- however, a graph may be extended interactively. \\ A transformation may be applied before or after statistics. Transformations modify meta-statistics released by the validator, like accuracy and confidence. Preprocess changes to these meta-statistics are tracked via accumulated stability penalties. The following metadata is now used. \\ 1. a partial release: \begin{verbatim} {"Offset": 45, "Minimum": 68, "Maximum": 90} \end{verbatim} 2. an analysis: \begin {center} \begin {tikzpicture}[-latex ,auto ,node distance =1 cm and 4cm ,on grid , semithick , state/.style ={ draw, minimum width =2 cm}] \node[state] (DataSource) {$DataSource$}; \node[state] (Constant) [below=of DataSource] {$Constant (Offset)$}; \node[state] (Add) [right=of DataSource] {$Add$}; \node[state] (NoisyCount) [right=of Add] {$NoisyCount$}; \node[state] (Minimum) [below=of NoisyCount] {$Constant (Minimum)$}; \node[state] (Maximum) [below=of Minimum] {$Constant (Maximum)$}; \node[state] (DPMean) [right =of Minimum] {$DPMean$}; \path (DataSource) edge (Add); \path (Constant) edge (Add); \path (Add) edge (NoisyCount); \path (Add) edge (DPMean); \path (NoisyCount) edge (DPMean); \path (Minimum) edge (DPMean); \path (Maximum) edge (DPMean); \end{tikzpicture} \end{center} The system passes the analysis and release to the validator for accuracy estimates. \begin{verbatim} {"NoisyCount": 8} \end{verbatim} The researcher submits the analysis to a runtime and receives an updated release. \begin{verbatim} { "Offset": 45, "Minimum": 68, "Maximum": 90, "NoisyCount": 498, "DPMean": 74.7 } \end{verbatim} In this situation, the system was unable to provide accuracy estimates for the DPMean statistic, due to missing dependencies, but was able to release an estimate from the runtime. If accuracy estimates are desired, the release may be passed to the validator to retrieve the DPMean node's accuracy estimates. \begin{verbatim} {"NoisyCount": 8, "DPMean": 2.7} \end{verbatim} One might expect the accuracy estimates for the DPMean node to be the same as the estimates in the previous example, because Add is a stability-1 transformation. However, this is a separate analysis from the previous example, and accuracy is dependent on the random quantity released from the NoisyCount node. \section{Conclusion} Much of the verbosity of the system can be hidden behind language bindings. The bindings can automatically initialize constant nodes, the interfaces between the validator and runtimes can be unified, and languages that support operator overloading can build up function graphs implicitly. The flexibility of the core architecture permits language bindings with seamless construction of verifiable and secure statistical analyses. The proposal outlines a language-agnostic centralized differential privacy library. The analysis abstraction permits bindings, parsers and runtimes in any language and on any platform, as the need arises. By using modular components, security-critical code is isolated. A single C++ runtime will allow bindings from any language to compute differentially private analyses. At the same time, potential runtimes in SQL, Spark, Dask, Pandas or C++, may implement a subset or all available primitives as desired. \end{document}
{ "alphanum_fraction": 0.7688552767, "avg_line_length": 58.8866396761, "ext": "tex", "hexsha": "05e1c5fd8000d9025316d29d7689d5cdcf32d371", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2020-12-08T15:57:02.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-22T13:29:54.000Z", "max_forks_repo_head_hexsha": "74f7cc7cce7f22c7f39b455ed7db99e04b328001", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "amanjeev/smartnoise-core", "max_forks_repo_path": "whitepapers/system_architecture/system-architecture.tex", "max_issues_count": 34, "max_issues_repo_head_hexsha": "74f7cc7cce7f22c7f39b455ed7db99e04b328001", "max_issues_repo_issues_event_max_datetime": "2021-01-11T13:44:20.000Z", "max_issues_repo_issues_event_min_datetime": "2020-10-22T13:56:57.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "amanjeev/smartnoise-core", "max_issues_repo_path": "whitepapers/system_architecture/system-architecture.tex", "max_line_length": 705, "max_stars_count": 53, "max_stars_repo_head_hexsha": "74f7cc7cce7f22c7f39b455ed7db99e04b328001", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "amanjeev/smartnoise-core", "max_stars_repo_path": "whitepapers/system_architecture/system-architecture.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:10:13.000Z", "max_stars_repo_stars_event_min_datetime": "2021-02-18T07:02:53.000Z", "num_tokens": 3488, "size": 14545 }
%!TEX TS-program = xelatex \documentclass[a4paper, svgnames, 12pt]{article} % -- Imports ------------------------------------------------------------------ % ============ % = Packages = % ============ \IfFileExists{References.bib}{ \usepackage[style=alphabetic, backend=biber]{biblatex} \addbibresource{References.bib}}{} \usepackage{bchart} \usepackage{enumitem} \usepackage{float} \usepackage{framed} \usepackage{minted} \usepackage{polyglossia} \usepackage[automark, nouppercase]{scrpage2} \usepackage{tcolorbox} \usepackage{titlesec} \usepackage{unicode-math} \usepackage{xcolor} \usepackage{xelatexemoji} \usepackage{xltxtra} \usepackage{hyperref} % ============== % = Attributes = % ============== \newcommand{\Title}{Useful YAML Features for Elektra} \newcommand{\TitleDescription}{Discussion Results} \newcommand{\Subject}{Results of a questionnaire about YAML features} \newcommand{\KeyWords}{YAML, Elektra} \newcommand{\LeftFooter}{\Title~—~\TitleDescription} \newcommand{\Author}{René Schwaiger} \newcommand{\Mail}{\href{mailto:[email protected]}{[email protected]}} % ============ % = Settings = % ============ % Boxes \newtcbox{\codebox}{ on line, arc = 2pt, colback = gray!10!white, boxsep = 2pt, left = 1pt, right = 1pt, top = 1pt, bottom = 1pt, boxrule = 0pt, bottomrule = 0pt, toprule = 0pt } \newtcolorbox{code-boxed}{ on line, arc=2pt, colback=gray!10!white, boxsep=2pt, left=1pt, right=1pt, top=1pt, bottom=1pt, boxrule=0pt, bottomrule=0pt, toprule=0pt } % Code \usemintedstyle{rainbow_dash} \BeforeBeginEnvironment{minted}{\begin{code-boxed}} \AfterEndEnvironment{minted}{\end{code-boxed}} % Colors \definecolor{Aqua}{rgb}{0, 0.56, 1} % Document Properties \hypersetup{pdftitle=\Title, pdfsubject=\Subject, pdfauthor=\Author, pdfkeywords=\KeyWords, colorlinks=true, linkcolor=black, anchorcolor=black, citecolor=gray, urlcolor=orange} \setlength\parindent{0cm} % Fonts \setmainfont{Avenir} \setsansfont{Ubuntu} \setmonofont{Ubuntu Mono} \newfontfamily\TitleFont{Ubuntu Condensed} % Headers & Footers \renewcommand{\headfont}{\normalfont} \setlength{\headheight}{1.8\baselineskip} \setheadsepline{0.5pt} \setfootsepline{0.5pt} \ihead{\headmark} \ohead{} \ifoot{\LeftFooter} \ofoot{\thepage} \pagestyle{scrheadings} % Language \setmainlanguage{english} \setotherlanguage{german} % Section & Paragraph Style \titleformat{\section}{\large\sffamily\bfseries}{}{0pt}{\thesection~·~} [{\color{Aqua}\hrule}] % ========== % = Macros = % ========== \newcommand{\code}[1]{ \codebox{\mintinline{yaml}|#1|} } % ============ % = Document = % ============ \begin{document} \begin{titlepage} \begin{center} {\Huge\TitleFont\Title} \vskip 0.5cm {\color{Aqua}\hrule} \vskip 0.5cm {\Large\TitleDescription} \vskip 14cm \end{center} \vfill \begin{leftbar} \begin{tabular}{ll} \textbf{Author} & \Author\\ \textbf{Mail} & \Mail\\ \textbf{Date} & \today \end{tabular} \end{leftbar} \end{titlepage} \setcounter{page}{2} % ======= % = TOC = % ======= { \titleformat{\section}{\sffamily\bfseries}{}{0pt}{}[{\color{Aqua}\hrule}] \tableofcontents } % ======== % = Text = % ======== \section{Introduction} The following document describes the result of a discussion about the serialization format \href{http://www.yaml.org}{YAML}. \section{Procedure} In the discussion 9 participants answered questions about the usefulness of certain YAML features for \href{https://www.libelektra.org}{Elektra}, a cross-plattform configuration library. Since YAML is not trivial, we introduced the format in a presentation. After we talked about a certain part of YAML, we answered questions the participants had about the information presented so far. Afterwards we asked the participants to fill in parts of a questionnaire about the newly introduced feature set. The questionnaire consisted of a checkbox for each feature. A checked box means that the participant considers the feature useful for Elektra, while a checkbox without a mark means the opposite. \subsection{Participants} All of the 9 participants were at least partially familiar with Elektra. Some also had previous experience with YAML. Seven of them listened to the presentation, while one participant was late and another one participated via eMail. The eMail participant received a copy of the presentation slides and the questionnaire. \section{Results} In the following bar charts the term “Yes” refers to a checked box for the specific feature. The term “?” means that the participant did not know enough about a part of YAML and therefore marked the checkbox for one feature, or the heading for multiple features, with a question mark. The value before the term “No” specifies the number of unchecked boxes minus the number of boxes marked with “?”. \subsection{Scalars} \subsubsection{Flow Scalars} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=3, value=Yes, color=orange]{3} \bcbar[text=6, value=No, color=Aqua]{6} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} Plain String \end{minted} \end{minipage} \caption{Plain Flow Scalar} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=2, value=Yes, color=orange]{2} \bcbar[text=7, value=No, color=Aqua]{7} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} 'Single Quoted ''String''' \end{minted} \end{minipage} \caption{Single Quoted Flow Scalar} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=8, value=Yes, color=orange]{8} \bcbar[text=1, value=No, color=Aqua]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} "Double\n Quoted\n \"String\"" \end{minted} \end{minipage} \caption{Double Quoted Flow Scalar} \end{figure} \subsubsection{Block Scalars} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=2, value=Yes, color=orange]{2} \bcbar[text=6, value=No, color=Aqua]{6} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} > # "Folded Style" Folded Style \end{minted} \end{minipage} \caption{Folded Block Scalar} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=2, value=Yes, color=orange]{2} \bcbar[text=6, value=No, color=Aqua]{6} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} | # "Literal\nStyle" Literal Style \end{minted} \end{minipage} \caption{Literal Block Scalar} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=1, value=Yes, color=orange]{1} \bcbar[text=7, value=No, color=Aqua]{7} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble, showspaces, spacecolor = lightgray, space=·]{yaml} >1 # " 1 Space Indentation" 1 Space Indentation \end{minted} \end{minipage} \caption{Indentation Header} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=8, value=No, color=Aqua]{8} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble, showspaces, spacecolor = lightgray, space=·]{yaml} >- # "No Trailing Whitespace" No Trailing Whitespace 󠀠 󠀠 \end{minted} \end{minipage} \caption{Chomping Header} \end{figure} \subsection{Lists} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=5, value=Yes, color=orange]{5} \bcbar[text=4, value=No, color=Aqua]{4} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} [🍎, 🍊, [Sugar, Eggs, Chocolate] ] \end{minted} \end{minipage} \caption{Flow Style} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=7, value=Yes, color=orange]{7} \bcbar[text=2, value=No, color=Aqua]{2} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} - 🍎 - 🍊 - - Sugar - Eggs - Chocolate \end{minted} \end{minipage} \caption{Block Style} \end{figure} \subsection{Mappings} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=5, value=Yes, color=orange]{5} \bcbar[text=4, value=No, color=Aqua]{4} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} { Austria: Vienna, South Africa: { Executive: Pretoria, Judicial: Bloemfontein, Legislative: Cape Town} } \end{minted} \end{minipage} \caption{Flow Style} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=7, value=Yes, color=orange]{7} \bcbar[text=2, value=No, color=Aqua]{2} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} Austria: Vienna South Africa: Executive: Pretoria Judicial: Bloemfontein Legislative: Cape Town \end{minted} \end{minipage} \caption{Block Style} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=9, value=No, color=Aqua]{9} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} ? - { 'pretty': complex key } - - 😱 - Still part of the key : value \end{minted} \end{minipage} \caption{Support for Complex Keys} \end{figure} \subsection{Multiple Documents} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=9, value=No, color=Aqua]{9} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} "Hello First Document" ... 'Second Document' ... Third Document \end{minted} \end{minipage} \caption{Support Streams} \end{figure} \subsection{Types} \subsubsection{Directives} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=1, value=Yes, color=orange]{1} \bcbar[text=7, value=No, color=Aqua]{7} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} %YAML 1.2 \end{minted} \end{minipage} \caption{YAML Version} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=3, value=Yes, color=orange]{3} \bcbar[text=5, value=No, color=Aqua]{5} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} %TAG ! tag:yaml.org,2002: %TAG !! tag:yaml.org,2002: %TAG !name! tag:yaml.org,2002: --- \end{minted} \end{minipage} \caption{Tag Handle Definition} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=2, value=Yes, color=orange]{2} \bcbar[text=6, value=No, color=Aqua]{6} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} %TAG !name! tag:yaml.org,2002: --- !name!str 6 # "6" \end{minted} \end{minipage} \caption{Named Tag Handle} \end{figure} \subsubsection{Tags} \paragraph{Tag Shorthands}~\\ \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=4, value=Yes, color=orange]{4} \bcbar[text=4, value=No, color=Aqua]{4} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \bcxlabel{} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} !suffix value \end{minted} \end{minipage} \caption{Primary Tag Handle} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=3, value=Yes, color=orange]{3} \bcbar[text=5, value=No, color=Aqua]{5} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} !!suffix value \end{minted} \end{minipage} \caption{Secondary Tag Handle} \end{figure} \begin{figure}[H] \paragraph{Verbatim Tags}~\\ \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=8, value=No, color=Aqua]{8} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} !<!ruby/object:Set> value \end{minted} \end{minipage} \caption{Local Verbatim Tags} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=8, value=No, color=Aqua]{8} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} !<tag:yaml.org,2002:str> value \end{minted} \end{minipage} \caption{Global Verbatim Tags} \end{figure} \paragraph{Other Tags}~\\ \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=0, value=$\quad$Yes, color=orange]{0} \bcbar[text=8, value=No, color=Aqua]{8} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} ! value \end{minted} \end{minipage} \caption{Non-Specific Tag} \end{figure} \subsubsection{Schemas} \textbf{Remark:} One participant checked the box for the core schema without ticking the boxes for the failsafe and JSON schema. Since the core schema is an extended superset of the other two schemas, we counted the participants answers as a “Yes” vote for the failsafe and JSON schema. \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=5, value=Yes, color=orange]{5} \bcbar[text=3, value=No, color=Aqua]{3} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \bcxlabel{} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \begin{itemize} \item String \item Sequence \item Map \end{itemize} \end{minipage} \caption{Failsafe Schema} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=5, value=Yes, color=orange]{5} \bcbar[text=3, value=No, color=Aqua]{3} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} Failsafe Schema + JSON Types: \begin{itemize} \item Null \item Boolean \item Integer \item Float \end{itemize} \end{minipage} \caption{JSON Schema} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=3, value=Yes, color=orange]{3} \bcbar[text=5, value=No, color=Aqua]{5} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} JSON Schema + \begin{itemize} \item Octal/Hex: \code{0o123}, \code{0xfefe} \item Multiple Notations for same value: \code{null}, \code{Null}, \code{~} \end{itemize} \end{minipage} \caption{Core Schema} \end{figure} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=3, value=Yes, color=orange]{3} \bcbar[text=5, value=No, color=Aqua]{5} \bcbar[text=1, value=?, color=DarkTurquoise]{1} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{itemize} \item Ordered Map \item Set \item Binary \item Time \item … \end{itemize} \end{minipage} \caption{Additional Types} \end{figure} \paragraph{Which Additional Types:} \begin{itemize} \item “” (No answer) \item “binary” \item “date (but implemented in plugins)” \end{itemize} \subsection{References} \begin{figure}[H] \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{bchart}[max=9, width=0.85\textwidth] \bcbar[text=7, value=Yes, color=orange]{7} \bcbar[text=2, value=No, color=Aqua]{2} \end{bchart} \end{minipage} \begin{minipage}[t]{0pt}~\end{minipage} \begin{minipage}[t]{0.48\textwidth} \vspace{0pt} \begin{minted}[autogobble]{yaml} flowers: &flowers 🌳🌸🌼 garden: - *flowers # 🌳🌸🌼 - *flowers # 🌳🌸🌼 \end{minted} \end{minipage} \caption{Support Anchors \& Aliases} \end{figure} % ================ % = Bibliography = % ================ \IfFileExists{TeX/References.bib}{ \titleformat{\section}{\sffamily\bfseries}{}{0pt}{}[{\color{aqua}\hrule}] \printbibliography }{} \end{document}
{ "alphanum_fraction": 0.6410750409, "avg_line_length": 26.8937823834, "ext": "tex", "hexsha": "f89f806abaf86edceb2784e3b0185b710671cc39", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4c2c4bc146d8fd2b047abcb16286bface336712f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sanssecours/YAML-Presentation", "max_forks_repo_path": "Results.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4c2c4bc146d8fd2b047abcb16286bface336712f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sanssecours/YAML-Presentation", "max_issues_repo_path": "Results.tex", "max_line_length": 694, "max_stars_count": null, "max_stars_repo_head_hexsha": "4c2c4bc146d8fd2b047abcb16286bface336712f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sanssecours/YAML-Presentation", "max_stars_repo_path": "Results.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7495, "size": 20762 }
%% Einleitung.tex %% $Id: einleitung.tex 61 2012-05-03 13:58:03Z bless $ %% \chapter{Introduction} \label{ch:Introduction} %% ============================== %% ============================== \section{Motivation} %% ============================== \label{ch:Introduction:sec:Motivation} \par{ In the last decades, digital data transfer became available everywhere and to everyone. This rise of digital data urges the need for data compression techniques or improvements on existing ones. Run-length encoding \cite{rle-patent} (abbreviated as RLE) is a simple coding scheme that performs lossless data compression. RLE compression simply represents the consecutive, identical symbols of a string with a run, usually denoted by $\sigma^i$, where $\sigma$ is an alphabet symbol and $i$ is its number of repetitions. To give an example, the string \emph{aaaabbaaabbbba} can be compressed into RLE format as $ a^{4}b^{2}a^{3}b^{4}a^{1}$ . Thanks to its simplicity it is still being used in several areas like fax transmission, where RLE compression is combined with other techniques into Modified Huffman Coding \cite{fax-rle} described in Section \ref{ch:Principles of compression:sec:Huffman Coding}. Most fax documents are typically simple texts on a white background, RLE compression is particularly suitable for fax and often achieves good compression ratios. } %% ============================== \section{Problem statement} %% ============================== \label{ch:Introduction:sec:Problem statement} \par{ Some strings like \emph{aaaabbbb} achieve a very good compression rate because the string only has two different characters and they repeat more than twice. Therefore, it can be compressed to $a^4b^4$ so from 8 byte down to 4 bytes if you encode it properly. On the other hand, if the input is highly mixed characters with few or no repetitions at all like \emph{abcdefgh}, the run length encoding of the string is $a^1b^1c^1d^1e^1f^1g^1h^1$ which needs up to 16 bytes, depending on the implementation. So the inherent problem with run length encoding is obviously the possible explosion in size, due to missing repetitions in the input string. Expanding the string to twice the original size is rather undesirable worst case behavior for a compression algorithm so one has to make sure the input data is fitted for RLE as compression scheme. One goal is to improve the compression ratio on data currently not suited for run length encoding and perform better than the originally proposed RLE, in order for it to work on arbitrary data.} %% ============================== \section{Main Objective} %% ============================== \label{ch:Introduction:sec:Main Objective} \par{ The main objectives that derives from the problem statement, is to achieve an improved compression ratio compared to regular run length encoding on strings or files that are currently not suited for the method. Additionally it is desirable to further increase its performance in cases it is already reasonable. To unify the measurements, the compression ratio is calculated by encoding all files listed in the Calgary Corpus which will be presented in Section \ref{tab:t05 The Calgary Corpus}. Since most improvements like permutations on the input, for example a reversible Burros-Wheeler-Transformation to increase the number of consecutive symbols or a different way of reading the byte stream take quite some time, encoding and decoding speed will decrease with increasing preprocessing effort. A secondary goal is, to keep encoding and decoding speed at a reasonable pace. } %% ============================== \section{Structure of this Work} %% ============================== \label{ch:Intoduction:sec:Structure} \par{ This work is structured into a first introduction into the basics of compression and the applied methods known to this discipline which will be used further on, and an analysis of the current state of the art. Then, the conceptual design is depicted with a following analysis of the results. Afterwards, the implementation of the algorithms are described and the work as a whole is evaluated with a short closing discussion. } %%% Local Variables: %%% mode: latex %%% TeX-master: "thesis" %%% End:
{ "alphanum_fraction": 0.7405738677, "avg_line_length": 105.425, "ext": "tex", "hexsha": "2bf18f55fe8a5ddc55fb38970e477e5640233d2c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fierg/two-dimensional-RLE", "max_forks_repo_path": "doc/thesis/src/einleitung.tex", "max_issues_count": 30, "max_issues_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_issues_repo_issues_event_max_datetime": "2020-01-26T15:29:46.000Z", "max_issues_repo_issues_event_min_datetime": "2019-07-15T14:19:47.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fierg/two-dimensional-RLE", "max_issues_repo_path": "doc/thesis/src/einleitung.tex", "max_line_length": 1067, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fierg/two-dimensional-RLE", "max_stars_repo_path": "doc/thesis/src/einleitung.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-18T15:08:24.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-18T15:08:24.000Z", "num_tokens": 890, "size": 4217 }
\documentclass[11pt,twocolumn]{amsart} \setlength{\columnsep}{0.5cm} \usepackage{amsfonts,amsthm,amssymb,amsmath,amsopn,float} \usepackage{xifthen} \usepackage{graphicx} \usepackage{tikz,pgfplots} \usepackage{epstopdf} %\usepackage{hyperref} \usepackage[pdftex,backref=section,hypertexnames=true,plainpages=false, naturalnames,colorlinks=true,linkcolor=blue,bookmarks]{hyperref} \usepackage[margin=0.5in]{geometry} % set the margins to 1ine on all sides %\usepackage{subfigure} %\usepackage{subcaption} %\usepackage[foot]{amsaddr} % to add address of the authors in footnote size at the bottom % for more insight into compilation error \errorcontextlines 90000 %% to check reference need to uncomment and after checking need to comment this back. %\usepackage{refcheck} \usepackage{algorithm} %\usepackage{algpseudocode} \usepackage{algcompatible} \usepackage{color} \definecolor{mygray}{RGB}{47,79,79} \usepackage{amsaddr} \makeatletter \renewcommand{\email}[2][]{% \ifx\emails\@empty\relax\else{\g@addto@macro\emails{,\space}}\fi% \@ifnotempty{#1}{\g@addto@macro\emails{\textrm{(#1)}\space}}% \g@addto@macro\emails{#2}% } \makeatother \usepackage{placeins} \usepackage{appendix} % for adding appendix \theoremstyle{definition} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{noname}[theorem]{} \newtheorem{sublemma}{}[theorem] \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{summary}[theorem]{Summary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \newcommand{\ba}{\backslash} \graphicspath{ {images/} } \numberwithin{equation}{section} \newcommand{\bolds}[1]{\boldsymbol{#1}} \newcommand{\sref}[2]{\hyperref[#2]{#1 \ref*{#2}}} \newcommand{\R}{\mathbf{R}} % The real numbers. \newcommand{\Z}{\mathbf{Z}} % The integer numbers. \newcommand{\calL}{\mathcal{L}} % The integer numbers. \newcommand{\bcolon}{\boldsymbol{:}} \newcommand{\dyad}{\mathbf{\otimes}} % Algorithmic modifications %\makeatletter %\newcommand{\HPXLOOP}[1]{\ALC@it\algorithmicloop\ #1% % \begin{ALC@hpx}} %\newcommand{\ENDHPXLOOP}{\end{ALC@hpx}\ALC@it\algorithmicendloop} %\makeatother \algblockdefx[Hloop]{Hloop}{EndHloop}[1][]{\textbf{hpx::parallel::for$\_$loop} #1}{\textbf{end parallel for}} \graphicspath{{imgs/}} %-------------------------------------------------------------------------% % document starts from here %-------------------------------------------------------------------------% \begin{document} \title{Mesh partitioning for HPX parallel computing for nonlocal computational models} \author{Prashant K. Jha$^\dagger$} \address[$\dagger$]{Oden Institute for Computational Engineering and Sciences, \\ The University of Texas at Austin, \\ Austin, TX 78712, USA} \email[$\dagger$]{[email protected]} \author{Patrick Diehl$^\ddagger$} \address[$\ddagger$]{Center for Computation and Technology, \\ Louisiana State University, \\ Baton Rouge, LA 70803 USA} \email[$\ddagger$]{[email protected]} \maketitle In this document, we consider a general class of nonlocal computational model seen in various fields, such as Peridynamics \cite{CMPer-Silling5,BobaruHu,HaBobaru,CMPer-Agwai,CMPer-Ghajari,Diehl,CMPer-Lipton2,CMPer-JhaLipton2,CMPer-JhaLipton5}, discrete element method \cite{NM-Desai,NM-Desai2,NM-Gladkyy,NM-Lobo,NM-Dosta}, Peridynamics plus discrete element method for granular media \cite{NM-Zhu, NM-Masoud}, nonlocal cell-cell adhesion in computational biology \cite{armstrong2006continuum,engwer2017structured,stinner2014global}, nonlocal heat equation \cite{burch2011classical,du2012analysis}. References above are only minuscule fraction of what is available. The application of nonlocal modeling method for understanding of complex spatially multiscale phenomenon is seen in various new fields such as fluid mechanics, particulate media, directed self-assembly of Block-Copolymers, tumor modeling, etc. What is more interesting and also convenient is that underlying algorithm which is used to numerically solve these nonlocal models is more or less same and therefore the efficient computational method available for one type of nonlocal model can be easily applied to the nonlocal model in other fields. It is our understanding that efficient computational method to solve the nonlocal model is very important. It is widely known that the nonlocal models are much more computationally demanding compared to their counterpart, namely local models (such as heat equation, wave equation, etc). This is because the length of interaction in nonlocal model is typically 3 to 10 times larger than the size of discretization and therefore, the usual local assembly of matrix and vector considered in solution of pdes (partial differential equation) is not feasible. The efficient parallel implementation because of long-range interaction is difficult due to dependence over much larger length scale. In this document we present this problem using simple example of nonlocal diffusion equation, see \cite{burch2011classical} and references therein for more information. Our main goal is to highlight key difficulties in designing massively parallel scheme for nonlocal models while keeping the model related complexity to minimum. For this purpose nonlocal heat equation is suitable. Further, to fix the ideas we only consider two dimensional setting. In this work we use parallel API called HPX. HPX is an open source asynchronous many task run time system that focuses on high performance computing~\cite{Heller2017,tabbal2011preliminary,kaiser2014hpx}. HPX provides wait-free asynchronous execution and futurization for synchronization. It also features parallel execution policies utilizing a task scheduler, which enables a fine-grained load balancing parallelization and synchronization due to work stealing. HPX is in strict adherence to the C++ $11$~\cite{cxx11_standard} and C++ $17$ standard definitions~\cite{cxx17_standard}.\\ \section{Nonlocal diffusion equation} In this section we give brief overview of nonlocal diffusion equation for temperature field $u$ over an square domain $D = [0,1]^2$ subjected to zero temperature condition on its boundary and subjected to given heat source distribution within the domain. It is a first order transient equation in time. For simplification, we only consider explicit time integration scheme, namely forward Euler scheme. For spatial discretization, we consider uniform mesh over domain $D$ and consider finite difference approximation (also commonly referred to as particle discretization). The resulting final equations are very simple and serial implementation requires only few lines of code, see \autoref{alg:serial}. With HPX, thread-level parallel implementation and shared-memory parallel implementation is almost as simple as serial implementation, see \autoref{alg:semi parallel}. Our goal is to fully parallelize the solver, i.e. starting from mesh partition to computation of fields at mesh nodes. We proceed step wise starting from serial implementation, to shared-memory parallel implementation, and to fully parallel implementation and discuss the challenges going from one step to next. Let material domain is $D = [0,1]^2$ and time domain $I = [0,T]$. We fix size of horizon (length over one point interacts with another, also called nonlocal length scale) $\epsilon>0$. Let $D_c = (-\epsilon, 1+\epsilon)^2 - D$ be the nonlocal boundary of thickness $\epsilon$ surrounding $D$. See \autoref{fig:domain}. We define $H_r(x)$ as two-dimensional open ball of radius $r$ centered at $x\in \R^2$. \begin{figure} \centering \includegraphics[scale=0.5]{material_domain.png} \caption{Material domain $D$ and nonlocal boundary $D_c$. Figure shows typical material point $x\in D$ and ball $H_\epsilon(x)$. }\label{fig:domain} \end{figure} Let $u:I \times D\cup D_c \to \R$ is a temperature field. Let $J:\R \to \R$ be positive function such that $0\leq J(r) \leq M$ for $r\in [0,1]$ and $J(r) = 0$ for $r\notin [0,1]$. We refer to $J$ as influence function. $u(t,x)$ satisfies following nonlocal diffusion equation, $\forall x\in D \text{ and } \forall t \in I$, \begin{align}\label{eq:diff eqn} \partial_t u(t,x) &= b(t,x) \notag \\ &\,+ c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) (u(t,y) - u(t,x)) dy, \end{align} where $b(t,x)$ is the external source at time $t$ and at point $x$ (this is a known function of space and time). Initial condition is given by \begin{align}\label{eq:ic} u(0,x) = u_0(x) \qquad \forall x\in D \end{align} and boundary condition is given by \begin{align} \label{eq:bc} u(t,x) = 0 \qquad \forall x\in D_c \text{ and } \forall t\in I. \end{align} Constant $c$ is chosen such that in the limit $\epsilon\to 0$ the nonlocal operator in right-hand side of \autoref{eq:diff eqn}. goes to $k\nabla \cdot \nabla u$. Local diffusion equation is given by \begin{align}\label{eq:locdiff} \partial_t u(t,x) &= k \nabla \cdot \nabla u(t,x), \end{align} where $k$ is the diffusivity constant. Boundary condition is $u = 0$ on $\partial D$ (consistent with boundary condition in \autoref{eq:bc} and initial condition is $u(0,x) = u_0(x)$. Constant $c$ is taken as \begin{align}\label{eq:constc} c &:= \begin{cases} \frac{k}{\epsilon^3 M_2}, \qquad \text{when dimension }d=1 \\ \frac{2k}{\pi\epsilon^4 M_3}, \qquad \text{when dimension }d=2, \end{cases} \end{align} where \begin{align}\label{eq:momentJ} M_i = \int_0^1 J(r) r^i dr. \end{align} For standard influence functions, $M_i$ can be computed easily. For example, when $J = 1$, $M_i = 1/(i+1)$, when $J = 1- r$, $M_2 = 1/12, M_3 = 1/20$, etc. With choice of constant $c$ in \autoref{eq:constc}, it can be shown formally (see \autoref{s:converge}) that \begin{align}\label{eq:limit} c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) (u(t,y) - u(t,x)) dy \to k \nabla \cdot \nabla u(t,x). \end{align} % Insert the algorithm \begin{algorithm}[ht] \caption{Serial implementation} \label{alg:serial} \begin{algorithmic}[1] \STATE \textcolor{mygray}{\it $\%$ Create neighbor list} \FOR {each integer $i \in K$} \IF {$|x_i - x_j| \leq \epsilon$} \STATE Add $j$ to neighborList$[i]$ \ENDIF \ENDFOR \STATE \STATE \textcolor{mygray}{\it $\%$ $U$ is the vector of temperatures of} \STATE \textcolor{mygray}{\it $\%$ all mesh nodes.} \STATE \STATE \textcolor{mygray}{\it $\%$ integrate in time} \FOR {each integer $0\leq k \leq T/\Delta t$} \STATE \textcolor{mygray}{\it $\%$ time step $k$} \FOR {each integer $i \in K$} \STATE \textcolor{mygray}{\it $\%$ Loop over neighbors of $i$} \STATE $val\_i = 0$ \FOR {each integer $j\in$ neighborList$[i]$} \STATE $val\_i = val\_i + $ \STATE $\quad c J(|x_j - x_i|/\epsilon) (U[j] - U[i])V_j$ \ENDFOR \STATE \textcolor{mygray}{\it $\%$ get external source} \STATE $b\_i = b(t^k, x_i)$ \STATE \textcolor{mygray}{\it $\%$ Update temperature} \STATE $U[i] = U[i] + \Delta t \times val\_i + \Delta t \times b\_i$ \ENDFOR \STATE \textcolor{mygray}{\it $\%$ Output temperature at time $t^k = k\Delta t$} \STATE Output($U$) \ENDFOR \end{algorithmic} \end{algorithm} \section{Finite difference approximation}\label{s:fd} Consider uniform mesh $D_h = (D\cup D_c)\cap (h\Z)^2$ of mesh size $h >0$, see \autoref{fig:mesh fd}. $\Z$ is the set of positive and negative integers. Let $\Delta t$ is size of time step and $[0,T]\cap (\Delta t \Z)$ be the discretization of time domain. We assume $\epsilon = m h$ where $m\geq 1$ is some integer. We consider index set $K \subset \Z^2$ such that for $i\in K$, $x_i = h i \in D$. Similarly, consider index set $K_c \subset \Z^2$ such that for $i\in K_c$, $x_i = h i \in D_c$. Let $\hat{u}^k_i$ be the solution of forward Euler discretization in time. It satisfies,$\forall 1\leq k \leq T/\Delta t, \forall i \in K$, \begin{align}\label{eq:forward fd} \dfrac{\hat{u}^{k+1}_i - \hat{u}^k_i}{\Delta t} &= b(t^k, x_i) \notag \\ &+ c \sum_{\substack{j \in K\cup K_c,\\ |x_j-x_i| \leq \epsilon}} J(|x_j - x_i|/\epsilon) (\hat{u}^k_j - \hat{u}^k_i) V_j \end{align} and \begin{align} \hat{u}^k_i = 0 \qquad \forall 0\leq k \leq T/\Delta t, \forall i \in K_c. \end{align} At $k=0$, we have $\hat{u}^0_i = u_0(x_i)$ for all $i\in K$. $V_j$ is the volume occupied by node $j$ and in our case we have $V_j = h^2$. \begin{figure}[ht] \centering \includegraphics[scale=0.6]{mesh_uniform.png} \caption{Uniform mesh of size $h$. Shaded area corresponds to nonlocal boundary $D_c$. We specify zero temperature on all the mesh nodes in the shaded area. For mesh node $x_i$ in $D$, we consider interaction of $x_i$ with all the mesh nodes inside the green shaded ball.}\label{fig:mesh fd} \end{figure} \subsection{Serial and semi-parallel implementation of forward euler scheme} Algorithm for serial implementation is given in \sref{Algorithm}{alg:serial}. For parallel implementation, we will use parallel for loop (HPX utility). See \sref{Algorithm}{alg:semi parallel}. In \sref{Algorithm}{alg:semi parallel}, we observe that temperature data of all mesh nodes is stored in single data variable, ``$U$", and similarly, neighbor list of all the mesh nodes are stored in single data variable, ``neighborList". We refer to this as semi-parallel as we have only parallelized for-loop and not the data. In fully parallel implementation, data will also be divided in number of computational nodes and each computational node will own data corresponding to it. This requires mesh partition. We discuss this in next section. % Insert the algorithm \begin{algorithm}[ht] \caption{Semi-parallel implementation} \label{alg:semi parallel} \begin{algorithmic}[1] \STATE \textcolor{mygray}{\it $\%$ Create neighbor list using } \STATE \textcolor{mygray}{\it $\%$ hpx parallel for loop} \Hloop {each integer $i \in K$} \textbf{do} \IF {$|x_i - x_j| \leq \epsilon$} \STATE Add $j$ to neighborList$[i]$ \ENDIF \EndHloop \STATE \STATE \textcolor{mygray}{\it $\%$ is the vector of temperatures of} \STATE \textcolor{mygray}{\it $\%$ all mesh nodes. } \STATE \STATE \textcolor{mygray}{\it $\%$ integrate in time} \FOR {each integer $1\leq k \leq T/\Delta t$} \STATE \textcolor{mygray}{\it $\%$ time step $k$} \STATE \textcolor{mygray}{\it $\%$ process mesh nodes in parallel} \Hloop {each integer $i \in K$} \STATE \textcolor{mygray}{\it $\%$ Loop over neighbors of $i$} \STATE $val\_i = 0$ \FOR {each integer $j\in$ neighborList$[i]$} \STATE $val\_i = val\_i + $ \STATE $\quad + c J(|x_j - x_i|/\epsilon) (U[j] - U[i])V_j$ \ENDFOR \STATE \textcolor{mygray}{\it $\%$ get external source} \STATE $b\_i = b(t^k, x_i)$ \STATE \textcolor{mygray}{\it $\%$ Update temperature} \STATE $U[i] = U[i] + \Delta t \times val\_i + \Delta t \times b\_i$ \EndHloop \STATE \textcolor{mygray}{\it $\%$ Output temperature at time $t^k = k\Delta t$} \STATE Output($U$) \ENDFOR \end{algorithmic} \end{algorithm} \section{Parallelization and mesh partition} Given $N$ number of computational nodes, mesh will be partitioned in $N$ parts such that each computational node will store the information corresponding to the mesh partition it owns. For example, consider 4 computers connected by network. We would like to run the problem on all 4 computers in parallel. We will partition the mesh in four parts, see \autoref{fig:mesh partition}. \begin{figure} \centering \includegraphics[scale=0.6]{mesh_partition.png} \caption{Typical mesh partition. Mesh nodes under each color are owned by the respective computer.}\label{fig:mesh partition} \end{figure} In \autoref{fig:mesh partition}, the mesh is colored to show the partition. Consider part of mesh colored as green. Let us suppose the id of computer who owns the green, blue, yellow, and red are 1, 2, 3, and 4 respectively. We now focus on one computational node, say green, and present two possible situations. \textbf{Case 1: }Consider mesh node $x_i$ which belongs to computer 1's partition. Let us suppose that $x_i$ is within the dashed line, see \autoref{fig:green case 1}. For such $x_i$, all mesh nodes $x_j \in H_\epsilon(x_i)$ will be within the green partition. Since computer 1 owns the mesh data of green partition, calculation of right hand side of \autoref{eq:forward fd}, i.e. $c J(|x_j - x_i|/\epsilon)(\hat{u}^k_j - \hat{u}^k_i)V_j$, can be carried out without needing to interact with other computers. \begin{figure}[ht] \centering \includegraphics[scale=0.5]{mesh_partition_green_case_1.png} \caption{Mesh nodes of green partition which are within dashed line do not interact with partition owned by other computers.}\label{fig:green case 1} \end{figure} \textbf{Case 2: }Now consider another mesh node $x_i$ in green partition. This mesh node is close to the boundary of green partition, and we see that there are mesh nodes $x_j$ which are in ball $H_\epsilon(x_i)$, but which are not part of green partition. For example, we consider $x_j \in H_\epsilon(x_i)$ in \autoref{fig:green case 2}. Mesh node $x_j$ is in Blue partition. Blue partition is owned by computer 2. Therefore, to compute the right hand side term in \autoref{eq:forward fd}, computer 1 has to request the information associated to mesh node $x_j$ from computer 2. We see that for all the mesh nodes in Green partition which are outside dash line, computer 1 will have to communicate with other computers for the information. \begin{figure}[ht] \centering \includegraphics[scale=0.5]{mesh_partition_green_case_2.png} \caption{In first example, mesh node $x_i$ of Green partition interacts with few mesh nodes of Blue partition owned by computer 2. In second example, we see that $x_i$ of Green mesh interacts with mesh nodes owned by computer 2, 3, and 4.}\label{fig:green case 2} \end{figure} \subsection{Algorithm for fully parallel code} Consider \sref{Algorithm}{alg:parallel} which outlines the steps needed to run the problem in fully parallel framework. Following are the list of steps which will require effort in implementing \sref{Algorithm}{alg:parallel}. \noindent \textbf{1. Partitioning of mesh: }Libraries are available which can partition the mesh into given number of computational nodes. \noindent \textbf{2. List of interacting mesh nodes owned by other computational node: }If global mesh data is available to each computational node then we can create a list of interacting neighboring mesh nodes which are owned by other computational node. This list will have global id, which is unique, of mesh nodes. \noindent \textbf{3. Sharing of information: }For given computational node, we need to implement the method which shares information to other computational node and which receives the information from other computational node. For this we need a list of information to be shared and communication method available in HPX framework. % Insert the algorithm \begin{algorithm}[ht] \caption{Fully parallel implementation} \label{alg:parallel} \begin{algorithmic}[1] \STATE \textcolor{mygray}{\it $\%$ mesh$\_$file is the file containing mesh data} \STATE \textcolor{mygray}{\it $\%$ N is the number of computational nodes} \STATE \STATE \textcolor{mygray}{\it $\%$ get the id of this computational node } \STATE my$\_$id = get$\_$id() \STATE \STATE \textcolor{mygray}{\it $\%$ read mesh data and create mesh partition} \STATE myMeshNodes = create$\_$mesh$\_$partition(mesh$\_$file) \STATE \STATE \textcolor{mygray}{\it $\%$ create neighbor list.} \STATE neighborList = create$\_$neighbor$\_$list(mesh$\_$file) \STATE \STATE \textcolor{mygray}{\it $\%$ next task is to create a list of mesh} \STATE \textcolor{mygray}{\it $\%$ nodes, associated to other computational} \STATE \textcolor{mygray}{\it $\%$ nodes, which interact with mesh nodes} \STATE \textcolor{mygray}{\it $\%$ owned by this computational node} \FOR {each integer $0\leq i \leq N$} \IF {$i == $ my$\_id$} \STATE \textcolor{mygray}{\it $\%$ skip this i} \ELSE \STATE \textcolor{mygray}{\it $\%$ create a list of interacting nodes} \STATE \textcolor{mygray}{\it $\%$ owned by comp. node $i$} \STATE neighborsOutside[i] \STATE \; = create$\_$list$\_$interacting$\_$nodes(my$\_$id, i) \ENDIF \ENDFOR \STATE \STATE \textcolor{mygray}{\it $\%$ $U$ is the vector of temperatures of} \STATE \textcolor{mygray}{\it $\%$ mesh nodes owned by this comp. node} \STATE \STATE \textcolor{mygray}{\it $\%$ integrate in time} \FOR {each integer $0\leq k \leq T/\Delta t$} \STATE \textcolor{mygray}{\it $\%$ time step $k$} \STATE \STATE \textcolor{mygray}{\it $\%$ get data from other computer } \STATE \textcolor{mygray}{\it $\%$ before integrating in time} \FOR {each integer $0\leq i \leq N$} \IF {$i == $ my$\_id$} \STATE \textcolor{mygray}{\it $\%$ skip this i} \ELSE \STATE \textcolor{mygray}{\it $\%$ request data from comp. node i} \STATE getData[i] = request$\_$data(my$\_$id, i) \STATE \STATE \textcolor{mygray}{\it $\%$ share data to comp. node i} \STATE \textcolor{mygray}{\it $\%$ as comp. node i may also have} \STATE \textcolor{mygray}{\it $\%$ mesh nodes which have neighbors } \STATE \textcolor{mygray}{\it $\%$ in this comp. node} \STATE share$\_$data(my$\_$id, i) \ENDIF \ENDFOR \algstore{myalg} \end{algorithmic} \end{algorithm} \begin{algorithm} \begin{algorithmic}[1] \algrestore{myalg} \STATE \textcolor{mygray}{\it $\%$ we now have all the relevant data so} \STATE \textcolor{mygray}{\it $\%$ we can solve for temperature} \STATE \textcolor{mygray}{\it $\%$ at next time step} \Hloop {$i \in $ myMeshNodes} \textbf{do} \STATE \textcolor{mygray}{\it $\%$ Loop over neighbors of $i$} \STATE $val\_i = 0$ \FOR {each integer $j\in$ neighborList$[i]$} \STATE \textcolor{mygray}{\it $\%$ check if $j$ is in myMeshNode} \IF {$j \in $ myMeshNode} \STATE \textcolor{mygray}{\it $\%$ this mesh node is within dashed line} \STATE \STATE \textcolor{mygray}{\it $\%$ compute contribution} \STATE $val\_i = val\_i $ \STATE $\quad + c J(|x_j - x_i|/\epsilon)$ \STATE $\quad (U[j] - U[i])V_j$ \ELSE \STATE \textcolor{mygray}{\it $\%$ this mesh node is outside dashed line} \STATE \textcolor{mygray}{\it $\%$ search and find which comp. node } \STATE \textcolor{mygray}{\it $\%$ owns mesh node $j$} \STATE get$\_$comp$\_$id = \STATE \; search$\_$for$\_$mesh$\_$node(my$\_$id, j) \STATE \STATE \textcolor{mygray}{\it $\%$ Look for j in getData[get$\_$comp$\_$id]} \STATE \textcolor{mygray}{\it $\%$ and get the temperature of mesh node j} \STATE t$\_$j = find$\_$in(j, getData[get$\_$comp$\_$id]) \STATE \STATE \textcolor{mygray}{\it $\%$ compute contribution} \STATE $val\_i = val\_i $ \STATE $\quad + c J(|x_j - x_i|/\epsilon)$ \STATE $\quad (t\_j - U[i])V_j$ \ENDIF \ENDFOR \STATE \textcolor{mygray}{\it $\%$ get external source} \STATE $b\_i = b(t^k, x_i)$ \STATE \textcolor{mygray}{\it $\%$ Update temperature} \STATE $U[i] = U[i] + \Delta t \times val\_i + \Delta t \times b\_i$ \EndHloop \STATE \textcolor{mygray}{\it $\%$ Output temperature at time $t^k = k\Delta t$} \STATE Output($U$) \ENDFOR \end{algorithmic} \end{algorithm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% References %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \FloatBarrier %\bibliographystyle{plain} %\bibliography{main} \newcommand{\noopsort}[1]{} \begin{thebibliography}{10} \bibitem{CMPer-Agwai} Abigail Agwai, Ibrahim Guven, and Erdogan Madenci. \newblock Predicting crack propagation with peridynamics: a comparative study. \newblock {\em International journal of fracture}, 171(1):65--78, 2011. \bibitem{armstrong2006continuum} Nicola~J Armstrong, Kevin~J Painter, and Jonathan~A Sherratt. \newblock A continuum approach to modelling cell--cell adhesion. \newblock {\em Journal of theoretical biology}, 243(1):98--113, 2006. \bibitem{NM-Masoud} Masoud Behzadinasab, Tracy~J Vogler, Amanda~M Peterson, Rezwanur Rahman, and John~T Foster. \newblock Peridynamics modeling of a shock wave perturbation decay experiment in granular materials with intra-granular fracture. \newblock {\em Journal of Dynamic Behavior of Materials}, 4(4):529--542, 2018. \bibitem{BobaruHu} Florin Bobaru and Wenke Hu. \newblock The meaning, selection, and use of the peridynamic horizon and its relation to crack branching in brittle materials. \newblock {\em International journal of fracture}, 176(2):215--222, 2012. \bibitem{burch2011classical} Nathanial Burch and Richard Lehoucq. \newblock Classical, nonlocal, and fractional diffusion equations on bounded domains. \newblock {\em International Journal for Multiscale Computational Engineering}, 9(6), 2011. \bibitem{NM-Desai} Prathamesh Desai. \newblock Tribosurface interactions involving particulate media with dem-calibrated properties: Experiments and modeling. \newblock 2017. \bibitem{NM-Desai2} Prathamesh~S Desai, Akash Mehta, Patrick~SM Dougherty, and Fred~C Higgs. \newblock A rheometry based calibration of a first-order dem model to generate virtual avatars of metal additive manufacturing (am) powders. \newblock {\em Powder technology}, 342:441--456, 2019. \bibitem{Diehl} P~Diehl, R~Lipton, and MA~Schweitzer. \newblock Numerical verification of a bond-based softening peridynamic model for small displacements: Deducing material parameters from classical linear theory. \newblock 2016. \bibitem{NM-Dosta} Maksym Dosta, Steven Dale, Sergiy Antonyuk, Carl Wassgren, Stefan Heinrich, and James~D Litster. \newblock Numerical and experimental analysis of influence of granule microstructure on its compression breakage. \newblock {\em Powder technology}, 299:87--97, 2016. \bibitem{du2012analysis} Qiang Du, Max Gunzburger, Richard~B Lehoucq, and Kun Zhou. \newblock Analysis and approximation of nonlocal diffusion problems with volume constraints. \newblock {\em SIAM review}, 54(4):667--696, 2012. \bibitem{engwer2017structured} Christian Engwer, Christian Stinner, and Christina Surulescu. \newblock On a structured multiscale model for acid-mediated tumor invasion: the effects of adhesion and proliferation. \newblock {\em Mathematical Models and Methods in Applied Sciences}, 27(07):1355--1390, 2017. \bibitem{CMPer-Ghajari} M~Ghajari, L~Iannucci, and P~Curtis. \newblock A peridynamic material model for the analysis of dynamic crack propagation in orthotropic media. \newblock {\em Computer Methods in Applied Mechanics and Engineering}, 276:431--452, 2014. \bibitem{NM-Gladkyy} Anton Gladkyy and Meinhard Kuna. \newblock Dem simulation of polyhedral particle cracking using a combined mohr--coulomb--weibull failure criterion. \newblock {\em Granular Matter}, 19(3):41, 2017. \bibitem{HaBobaru} Youn~Doh Ha and Florin Bobaru. \newblock Studies of dynamic crack propagation and crack branching with peridynamics. \newblock {\em International Journal of Fracture}, 162(1-2):229--244, 2010. \bibitem{Heller2017} Thomas Heller, Patrick Diehl, Zachary Byerly, John Biddiscombe, and Hartmut Kaiser. \newblock {HPX -- An open source C++ Standard Library for Parallelism and Concurrency}. \newblock In {\em {Proceedings of OpenSuCo 2017, Denver , Colorado USA, November 2017 (OpenSuCo’17)}}, page~5, 2017. \bibitem{CMPer-JhaLipton2} Prashant~K. Jha and Robert Lipton. \newblock Numerical convergence of nonlinear nonlocal continuum models to local elastodynamics. \newblock {\em International Journal for Numerical Methods in Engineering}, 114(13):1389--1410, 2018. \bibitem{CMPer-JhaLipton5} Prashant~K Jha and Robert Lipton. \newblock Numerical convergence of finite difference approximations for state based peridynamic fracture models. \newblock {\em Computer Methods in Applied Mechanics and Engineering}, 351:184--225, July 2019. \bibitem{kaiser2014hpx} Hartmut Kaiser, Thomas Heller, Bryce Adelstein-Lelbach, Adrian Serio, and Dietmar Fey. \newblock Hpx: A task based programming model in a global address space. \newblock In {\em Proceedings of the 8th International Conference on Partitioned Global Address Space Programming Models}, page~6. ACM, 2014. \bibitem{CMPer-Lipton2} Robert Lipton, Stewart Silling, and Richard Lehoucq. \newblock Complex fracture nucleation and evolution with nonlocal elastodynamics. \newblock {\em arXiv preprint arXiv:1602.00247}, 2016. \bibitem{NM-Lobo} Sebastian Lobo-Guerrero and Luis~E Vallejo. \newblock Discrete element method evaluation of granular crushing under direct shear test conditions. \newblock {\em Journal of Geotechnical and Geoenvironmental Engineering}, 131(10):1295--1300, 2005. \bibitem{CMPer-Silling5} SA~Silling, O~Weckner, E~Askari, and Florin Bobaru. \newblock Crack nucleation in a peridynamic solid. \newblock {\em International Journal of Fracture}, 162(1-2):219--227, 2010. \bibitem{stinner2014global} Christian Stinner, Christina Surulescu, and Michael Winkler. \newblock Global weak solutions in a pde-ode system modeling multiscale cancer cell invasion. \newblock {\em SIAM Journal on Mathematical Analysis}, 46(3):1969--2007, 2014. \bibitem{tabbal2011preliminary} Alexandre Tabbal, Matthew Anderson, Maciej Brodowicz, Hartmut Kaiser, and Thomas Sterling. \newblock Preliminary design examination of the parallex system from a software and hardware perspective. \newblock {\em ACM SIGMETRICS Performance Evaluation Review}, 38(4):81--87, 2011. \bibitem{cxx11_standard} {The C++ Standards Committee}. \newblock {ISO International Standard ISO/IEC 14882:2011, Programming Language C++}. \newblock Technical report, {Geneva, Switzerland: International Organization for Standardization (ISO).}, 2011. \newblock \url{http://www.open-std.org/jtc1/sc22/wg21}. \bibitem{cxx17_standard} {The C++ Standards Committee}. \newblock {ISO International Standard ISO/IEC 14882:2017, Programming Language C++}. \newblock Technical report, {Geneva, Switzerland: International Organization for Standardization (ISO).}, 2017. \newblock \url{http://www.open-std.org/jtc1/sc22/wg21}. \bibitem{NM-Zhu} F~Zhu and J~Zhao. \newblock A peridynamic investigation on crushing of sand particles. \newblock {\em G{\'e}otechnique}, 69(6):526--540, 2018. \end{thebibliography} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{appendices} \section{Constructing exact solution} The idea is to use source term $b$ to construct the analytical solution. This is shown next. \subsection{One dimension} Let $$w(t,x) = \cos(2\pi t) \sin(2\pi x)$$ when $x\in [0,1]$ and $w(t,x) = 0$ when $x\notin [0,1]$. We want solution of \autoref{eq:diff eqn} $u$ to be equal to $w$, i.e., $u = w$. To achieve this we substitute \begin{align}\label{eq:source exact 1d} b(t,x) &= \partial_t w(t,x) \notag \\ &\,- c \int_{H_\epsilon(x)} J(|y-x|/\epsilon) (w(t,y) - w(t,x)) dy \end{align} in \autoref{eq:diff eqn} and prescribe $u(0,x) = w(0,x) = \sin(2\pi x)$ as initial condition on $u$ and $u(t,x) = 0$ on $x\in D_c$ as boundary condition. Note that $$ \partial_t w(t,x) = -2\pi \sin(2\pi t) \sin(2\pi x) $$ when $x\in [0,1]$ and $\partial_t w(t,x) = 0$ when $x\notin [0,1]$. \subsection{Two dimension} Let $$w(t,x) = \cos(2\pi t) \sin(2\pi x_1) \sin(2\pi x_2)$$ when $x\in [0,1]^2$ and $w(t,x) = 0$ when $x\notin [0,1]^2$. We substitute \begin{align}\label{eq:source exact 2d} b(t,x) &= \partial_t w(t,x) \notag \\ &\, - c \int_{H_\epsilon(x)} J(|y-x|/\epsilon) (w(t,y) - w(t,x)) dy \end{align} in \autoref{eq:diff eqn} and prescribe $$u(0,x) = w(0,x) = \sin(2\pi x_1)\sin(2\pi x_2)$$ as initial condition on $u$ and $u(t,x) = 0$ on $x\in D_c$ as boundary condition. To obtain numerical solution $\hat{u}^k_i$ following discretization \autoref{eq:forward fd}, we essentially have to compute source $b$ at time $t=t^k = k\Delta t$ and point $x = x_i = ih$ using the formula \autoref{eq:source exact 1d} (in 1-d simulation) and \autoref{eq:source exact 2d} (in 2-d simulation). \subsection{Numerical discretization error} Suppose $\bar{u}(t,x)$ is the exact solution and $\hat{u}^k_i$ for $0\leq k \leq T/\Delta t$ and $i\in K$ is the numerical solution. The total error at time $t^k$ is taken as \begin{align}\label{eq:err} e^k &:= h^d \sum_{i\in K} |\bar{u}(t^k, x_i) - \hat{u}^k_i|^2, \end{align} where $d=1,2$ is the dimension. Total error can be defined as $e:= \sum_{0\leq k\leq T/\Delta t} e^k$. \section{Limit of nonlocal operator to local operator in diffusion equation}\label{s:converge} We show through formal calculation that \begin{align*} c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) (u(y) - u(x)) dy \to k \nabla \cdot \nabla u(x). \end{align*} Let \begin{align}\label{eq:opL} \calL(u)(x) = c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) (u(y) - u(x)) dy, \end{align} where \begin{align}\label{eq:constc 2} c &:= \begin{cases} \frac{k}{\epsilon^3 M_2}, \qquad \text{when dimension }d=1 \\ \frac{2k}{\pi\epsilon^4 M_3}, \qquad \text{when dimension }d=2, \end{cases} \end{align} with $M_i = \int_0^1 J(r) r^i dr$. Taylor's series approximation gives: \begin{align*} u(y) &= u(x) + \nabla u(x) \cdot (y-x) + \frac{1}{2} \nabla^2 u(x)\bcolon (y-x) \dyad (y-x) \notag \\ &\,+ \frac{1}{6} \nabla^3 u(\xi) \bcolon (y-x)\dyad (y-x) \dyad (y-x), \end{align*} where $\xi = \xi(y,x) \in D\cup D_c$. We substitute above in \autoref{eq:opL} to get \begin{align*} &\calL(u)(x) \notag \\ &= c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) \nabla u(x) \cdot (y-x) dy\notag \\ &\, + c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) \frac{1}{2} \nabla^2 u(x)\bcolon (y-x) \dyad (y-x) dy \notag \\ &\, + c \int_{H_\epsilon(x)} J(\frac{|y-x|}{\epsilon}) \frac{1}{6} \nabla^3 u(\xi) \bcolon (y-x)\dyad (y-x) \dyad (y-x) dy. \end{align*} First term in above is zero as $J$ is even function of $y$ and $(y-x)$ is odd function. We consider change in variable $y = x + \epsilon \eta$ where $\eta \in H_1(0)$. Note that $dy = \epsilon^d d\eta$ where $d$ is dimension. We have \begin{align}\label{eq:opL2} &\calL(u)(x) \notag \\ &= c \int_{H_1(0)} J(|\eta|) \frac{1}{2} \nabla^2 u(x)\bcolon \epsilon \eta \dyad \epsilon \eta \epsilon^d d\eta \notag \\ &\, + c \int_{H_1(0)} J(|\eta|) \frac{1}{6} \nabla^3 u(\xi) \bcolon \epsilon \eta \dyad \epsilon \eta \dyad \epsilon \eta \epsilon^d d\eta \notag \\ &= \frac{c\epsilon^{d+2}}{2} \nabla^2 u(x) \bcolon \left[ \int_{H_1(0)} J(|\eta|) \eta \dyad \eta d\eta \right] \notag \\ &\, + \frac{c\epsilon^{d+3}}{6} \int_{H_1(0)} J(|\eta|) \nabla^3 u(\xi) \bcolon \eta \dyad\eta \dyad\eta d\eta . \end{align} Assuming $u$ is such that $|\nabla^3 u| < \infty$ at all points, third term is of the order $$O(c|\nabla^3 u| \epsilon^{d+3})$$. We analyze the term in square bracket next. \paragraph{One dimension} When $d=1$, we have \begin{align} &\int_{H_1(0)} J(|\eta|) \eta \dyad \eta d\eta\notag \\ &=\int_{-1}^1 J(|\eta|) \eta^2 d\eta \notag \\ &= 2\int_0^1 J(r) r^2 dr = 2 M_2, \end{align} where we used the fact that $J(|\eta|) \eta^2$ is even and noted the definition of moment $M_2$. Substituting this in \autoref{eq:opL2} and noting the definition of constant $c$ in \autoref{eq:constc 2}, we conclude that $$ \calL(u)(x) = k \partial_{xx} u(x) + O(|\partial_{xxx}u| \epsilon). $$ Taking $\epsilon \to 0$ gives $\calL(u) \to k\partial_{xx} u$. \paragraph{Two dimension} When $d=2$, we have \begin{align*} &\int_{H_1(0)} J(|\eta|) \eta \dyad \eta d\eta\notag \\ &=\int_0^1 \int_0^{2\pi} J(|\eta|) \eta\dyad\eta rdrd\theta \notag \\ &= \left(\int_0^1 \int_0^{2\pi} J(|\eta|) \eta_i \eta_j rdrd\theta \right) e_i \dyad e_j. \end{align*} Where $e_1, e_2$ are basis vector in 2-d, $i,j\in \{1,2\}$, and Einstein summation is implied above. Note that in cylindrical coordinate, $\eta_1 = r\cos(\theta), \eta_2 = r\sin(\theta)$ and $|\eta| = r$. We use following trigonometric identities \begin{align}\label{eq:trig} \int_0^{2\pi} \cos^2(\theta) d\theta &= \pi, \notag \\ \int_0^{2\pi} \sin^2(\theta) d\theta &= \pi, \notag \\ \int_0^{2\pi} \cos(\theta) \sin(\theta) d\theta &= 0 \end{align} to get $$ \int_0^{2\pi} \eta_i \eta_j d\theta = r^2 \pi \delta_{ij} $$ where $\delta_{ij} = 0$ if $i\neq j$ and $\delta_{ij} = 1$ if $i=j$. Substituting to get \begin{align*} &\int_{H_1(0)} J(|\eta|) \eta \dyad \eta d\eta\notag \\ &= \left(\int_0^1 J(r) r^3 dr \right) \pi \delta_{ij} e_i \dyad e_j \notag \\ &= \pi M_3 \delta_{ij} e_i \dyad e_j, \end{align*} Substituting this in \autoref{eq:opL2} and noting the definition of constant $c$ in \autoref{eq:constc 2}, we get \begin{align*} &\calL(u)(x) \notag \\ &\, = \frac{c\epsilon^{4}}{2} \pi M_3 \nabla^2 u(x) \bcolon \delta_{ij} e_i \dyad e_j + O(c|\nabla^3 u| \epsilon^5)\notag \\ &\, = \frac{c\epsilon^{4}}{2} \pi M_3 \nabla \cdot \nabla u(x) + O(c|\nabla^3 u| \epsilon^5), \end{align*} where we used the fact that $$\nabla^2 u(x) \bcolon \delta_{ij} e_i \dyad e_j = \nabla \cdot \nabla u(x).$$ Substituting definition of $c$ from \autoref{eq:constc 2} to get $$ \calL(u)(x) = k \nabla \cdot \nabla u(x) + O(|\nabla^3 u| \epsilon). $$ Taking $\epsilon \to 0$ gives $\calL(u) \to k\nabla \cdot \nabla u$. \end{appendices} \end{document}
{ "alphanum_fraction": 0.7048104763, "avg_line_length": 52.4331476323, "ext": "tex", "hexsha": "d7aae0d4ef20d7f24f36327e88f7b479b8d83323", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2020-10-25T12:43:28.000Z", "max_forks_repo_forks_event_min_datetime": "2020-05-21T12:32:21.000Z", "max_forks_repo_head_hexsha": "a0f68f20d89143f22ab40c80a21bdbe729df33d4", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "PeriHPX/nonlocalheatequation", "max_forks_repo_path": "description/problem_description.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "a0f68f20d89143f22ab40c80a21bdbe729df33d4", "max_issues_repo_issues_event_max_datetime": "2020-08-01T13:03:47.000Z", "max_issues_repo_issues_event_min_datetime": "2020-05-22T13:30:02.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "PeriHPX/nonlocalheatequation", "max_issues_repo_path": "description/problem_description.tex", "max_line_length": 1211, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a0f68f20d89143f22ab40c80a21bdbe729df33d4", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "PeriHPX/nonlocalheatequation", "max_stars_repo_path": "description/problem_description.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-29T04:20:02.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-29T04:20:02.000Z", "num_tokens": 12279, "size": 37647 }
%!TEX root = report.tex \chapter{Experiments} \label{chp:experiments} In this chapter we detail the experiments performed on the model described in \fullref{chp:theorystuff} and discuss the respective results. \section{First experiment} \label{subsec:first} % documentazione wrapfig: https://ctan.mirror.garr.it/mirrors/ctan/macros/latex/contrib/wrapfig/wrapfig-doc.pdf % lo segno perché il primo parametro opzionale può tornare utile \begin{wrapfigure}[15]{r}{0.5\textwidth} \centering \vspace{-20pt} \includegraphics[width=0.5\textwidth]{400_loss} \caption{Initial experiment loss (400 epochs on Wiki)} \label{fig:400_loss} \end{wrapfigure} An initial experiment was performed by training the C3AE model for 400 epochs on the Wiki dataset as a means to test the \texttt{tensorflow} environment and the functioning of the implemented C3AE model within it. The evolution of training and validation loss is shown in \autoref{fig:400_loss}. The experiment ran to completion with no runtime errors, and it can be observed that the model was able to decrease its loss, and that such loss reached an asymptote around epoch 50. For this reason, we concluded that a training time of 400 epochs is too much for this model and decided to set the epoch limit to 100 for all following experiments, assuming that they would have a similar evolution to this one and therefore all significant improvement would happen much before the 100\textsuperscript{th} epoch, in order to reduce the experiments' computation time. \section{Performance evaluation over multiple datasets} All the following experiments have been performed on datasets processed with the augmentation techniques described in \fullref{subsec:augmentation} unless stated otherwise. \newpage \subsection{Wiki} \label{subsec:wiki} \begin{wrapfigure}[15]{r}{0.5\textwidth} \centering \vspace{-20pt} \includegraphics[width=0.5\textwidth]{full_model_mae} \caption{MAE on Wiki dataset (100 epochs)} \label{fig:wiki_loss} \end{wrapfigure} The first and main dataset used in our experiments has been Wiki. As shown in the loss graph in \autoref{fig:wiki_loss} the learning curve is correct and the best MAE value obtained in this experiment is 6.79 years. When compared to the result in the original paper, which claims to have reached a MAE of 6.44 on the same dataset, we could say that we have pretty much achieved a state-of-the-art performance. However, this result only takes in consideration Wiki images for both training and validation. So we tested the output of this experiment with the FGNET test set, and found a much higher MAE of 18.1 years. The main reason for this is that the Wiki dataset is mostly composed by images of adults and elders and features almost no children, while FGNET has a much lower age on average and comprises even newborns with a declared age of 0 years. The result is that the model in this experiment always overshoots the age of the subjects and thus we obtain a high error. \subsection{UTK} \label{subsec:utk} \begin{wrapfigure}[12]{r}{0.5\textwidth} \centering \vspace{-20pt} \includegraphics[width=0.5\textwidth]{utk_mae} \caption{MAE on UTK dataset (100 epochs)} \label{fig:utk_loss} \end{wrapfigure} In this experiment the starting conditions and parameters are the same as the previous one, but the dataset this time is UTK. The number of images is one third of Wiki, but it covers better the whole range of ages from 0 to over 100. As a matter of fact the validation MAE (computed on the UTK set itself) is slightly higher than before at 8.67, but the test MAE on FGNET is almost halved, at 9.79 years. The conclusion is that UTK is a better dataset for our experiments, but we can still use Wiki, as seen on the next experiment. \subsection{Wiki + UTK} \label{subsec:wikiutk} \begin{wrapfigure}[11]{r}{0.5\textwidth} \centering \vspace{-20pt} \includegraphics[width=0.5\textwidth]{wiki+utk_200_mae} \caption{MAE on Wiki+UTK (100 epochs)} \label{fig:wiki+utk_loss} \end{wrapfigure} This third experiment combines the previous two. We started by pre-training a model from scratch with the Wiki dataset, and then we took the output of this process and further trained it for another 100 epochs on the UTK dataset. In this way we hoped to combine the scale of the first dataset with the completeness of the second to obtain a model that outperforms the previous two. And indeed, with a final validation MAE of 8.23 and a test MAE of 8.64 years on FGNET this has proven to be our best result yet. \section{Ablation Study} \label{sec:ablation_study} A separate set of experiments was performed to study the impact on performance of the following components of the model and the training process: the context module and the cascade module of the C3AE model, and the training data augmentation. We trained the following variants of the full C3AE model: \begin{itemize} \item \textit{Full model}: the standard model with no changes, to serve as a benchmark against the other variants. \item \textit{No augmentation}: the data augmentation transformations on the training data are disabled. \item \textit{No context}: the context module of C3AE is excluded. Therefore, only one crop is given as input to the model. \item \textit{No cascade}: the cascade module of C3AE is excluded. Consequently, this variant does not compute any KL Divergence and outputs only the final age estimation. \item \textit{No context and no cascade}: both modules are disabled. \end{itemize} Each variant was trained for 100 epochs on the Wiki dataset, so every ablation experiment results must be compared to the baseline results of the one described in \fullref{subsec:wiki}. \subsection{No augmentation} \label{subsec:no_augmentation} In this first ablation study we removed the whole augmentation process described in \fullref{subsec:augmentation}. The validation MAE increases by 23\% compared to the the Wiki full model experiments, and the test MAE by not less that 30\%. We conclude that the augmentation phase implemented by us has been highly beneficial to the full model performance. \subsection{No context} \label{subsec:no_context} After the removal of the context module, the model retained its performance if we consider only the validation results (MAE = 6.88). However when examining the test results on FGNET they are rather worse then before, with the test MAE going from 18.11 to 20.73. This performance difference proves us that considering image cuts with three different levels of details instead of only one and combining their results is useful to the full model performance. \subsection{No cascade} \label{subsec:no_cascade} When we take out the cascade module we the only metric that contributes to the total loss is the MAE, since we lose the output with the age distribution and also the KLD loss. The direct estimation of an age from the image in input is quite a harder task, judging by the results of this experiment. The validation MAE is by far the highest found so far, at 12.6 years, and the test MAE on FGNET isn't great either, reaching 18.67. Once again, we can interpret this performance drop as a proof that also the cascade module is essential to obtain a good result in the full model. \subsection{Full ablation} \label{subsec:full_ablation} The absence of both the previous modules results unsurprisingly in a performance comparable to the worst of the two. The validation and test MAE values are in fact aligned to the MAEs returned by the "no cascade" model. \begin{figure} \centering \subfloat[MAE without augmentation]{ \includegraphics[width=65mm]{no_augm_mae} \label{fig:no_augm} } \subfloat[MAE without context]{ \includegraphics[width=65mm]{no_context_mae} \label{fig:no_context} } \hspace{0mm} \subfloat[MAE without cascade]{ \includegraphics[width=65mm]{no_cascade_mae} \label{fig:no_cascade} } \subfloat[MAE without either]{ \includegraphics[width=65mm]{full_ablation_mae} \label{fig:full_ablation} } \caption{Ablation Study MAE} \end{figure} \begin{table} \begin{tabular}{||c | c c c c | c c||} \hline Section & Model & Dataset & Augm. & Epochs & Valid. MAE & Test MAE\\ [1ex] \hline\hline \hyperref[subsec:first]{First} & Full & Wiki & Some & 400 & 6.82 & 22.72 \\ [1ex] \hline \hyperref[subsec:wiki]{Wiki} & Full & Wiki & \checked & 100 & 6.79 & 18.11 \\ [1ex] \hline \hyperref[subsec:utk]{UTK} & Full & UTK & \checked & 100 & 8.67 & 9.79 \\ [1ex] \hline \hyperref[subsec:wikiutk]{Wiki+UTK} & Full & Wiki+UTK & \checked & 100+100 & 8.23 & 8.64 \\ [1ex] \Xhline{2\arrayrulewidth} \hyperref[subsec:no_augmentation]{No augm.} & Full & Wiki & × & 100 & 8.36 & 24.41 \\ [1ex] \hline \hyperref[subsec:no_context]{No context} & No context & Wiki & \checked & 100 & 6.88 & 20.73 \\ [1ex] \hline \hyperref[subsec:no_cascade]{No cascade} & No cascade & Wiki & \checked & 100 & 12.60 & 18.67 \\ [1ex] \hline \hyperref[subsec:full_ablation]{Full ablation} & Full ablation & Wiki & \checked & 100 & 12.60 & 18.69 \\ [1ex] \hline \end{tabular} \caption{Recap table of experiment parameters and results} \label{tab:big_table} \end{table}
{ "alphanum_fraction": 0.7599180681, "avg_line_length": 42.3561643836, "ext": "tex", "hexsha": "11d361f41ee8422a6303aa0025df03c00e845b28", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3207e2e29e605ea030c2f888c7eb02564867b553", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "torchipeppo/NN-project", "max_forks_repo_path": "report/4_experiments.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3207e2e29e605ea030c2f888c7eb02564867b553", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "torchipeppo/NN-project", "max_issues_repo_path": "report/4_experiments.tex", "max_line_length": 113, "max_stars_count": 2, "max_stars_repo_head_hexsha": "3207e2e29e605ea030c2f888c7eb02564867b553", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "torchipeppo/NN-project", "max_stars_repo_path": "report/4_experiments.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-04T16:56:08.000Z", "max_stars_repo_stars_event_min_datetime": "2021-05-09T14:12:13.000Z", "num_tokens": 2533, "size": 9276 }
%============================================================================= \subsection{MoEDAL data storage in the GridPP DFC} \label{sec:data} %============================================================================= %----------------------------------------------------------------------------- \subsubsection{Accessing data on the DFC} \label{sec:dataaccess} %----------------------------------------------------------------------------- If you've followed the GridPP UserGuide, you'll know how to access data stored on the Grid via the \acf{DFC}. The only thing that's different for MoEDAL work is the \ac{VO} you'll use when generating your Grid proxy. So for example, to access the \ac{DFC} via the \acf{DFCCLI}: \begin{lstlisting}[gobble=0,numbers=none,language=bash] $ source /cvmfs/ganga.cern.ch/dirac_ui/bashrc $ dirac-proxy-init -g vo.moedal.org_user -M Generating proxy... Enter Certificate password: # Enter your grid certificate password... . . [Proxy information-based output.] . $ dirac-dms-filecatalog-cli Starting FileCatalog client File Catalog Client $Revision: 1.17 $Date: FC:/> \end{lstlisting} %\begin{Shaded} %\begin{Highlighting}[] %\NormalTok{$ }\KeywordTok{source} \NormalTok{/cvmfs/ganga.cern.ch/dirac_ui/bashrc} %\NormalTok{$ }\KeywordTok{dirac-proxy-init} \NormalTok{-g gridpp_user -M} %\KeywordTok{Generating} \NormalTok{proxy... } %\KeywordTok{Enter} \NormalTok{Certificate password: }\CommentTok{# Enter your grid certificate password...} %\KeywordTok{.} %\KeywordTok{.} \NormalTok{[}\KeywordTok{Proxy} \NormalTok{information-based output.]} %\KeywordTok{.} %\end{Highlighting} %\end{Shaded} As one would expect, MoEDAL data can be found in MoEDAL's space in the \ac{DFC}. \begin{lstlisting}[gobble=0,numbers=none,language=bash] FC:/> ls /vo.moedal.org/ sim data user \end{lstlisting} %----------------------------------------------------------------------------- \subsubsection{Adding data to the MoEDAL DFC} \label{sec:addingdata} %----------------------------------------------------------------------------- From here it is straightforward to add data to the \ac{DFC} by following the instructions in the GridPP UserGuide. Your own data should be added to your space in the MoEDAL \texttt{user} section: \begin{lstlisting}[gobble=0,numbers=none,language=bash] FC:/> cd /vo.moedal.org/user/l/lise.meitner FC:/> pwd /vo.moedal.org/user/l/lise.meitner FC:/> mkdir my_moedal_data FC:/> ls my_moedal_data \end{lstlisting} \begin{hintbox}{The DIRAC command line tools} \emph{Don't forget you can use the DIRAC command lines tools to automate uploads and downloads once you have created your user space in the MoEDAL \ac{DFC}.} \end{hintbox} \clearpage %----------------------------------------------------------------------------- \subsubsection{An example: the 13 TeV LHE files} \label{sec:lhedataexample} %----------------------------------------------------------------------------- Experiment-wide data (i.e. data used by everyone in the Collaboration) can and is also hosted by GridPP and listed in the \ac{DFC}. For example, the \ac{LHE} files used in the 13 TeV GEANT4 simulations of the \ac{MoEDAL} experiment may be found here: \begin{lstlisting}[gobble=0,numbers=none,language=bash] FC:> cd /vo.moedal.org/sim/MMT13TeVa/LHE/13TeV/SpinHalf/Events FC:> ls run_01 run_02 run_03 run_04 run_05 run_06 run_07 run_08 run_09 run_10 FC:> cd run_01 FC:> ls events.lhe.gz run_01_tag_1_banner.txt unweighted_events.lhe.gz unweighted_events.root \end{lstlisting} These files can then be used by jobs running on the Grid with a \ac{MoEDAL} \ac{VO} proxy, as we'll see in Section~\ref{sec:moedalgridrunning}.
{ "alphanum_fraction": 0.6310467341, "avg_line_length": 32.963963964, "ext": "tex", "hexsha": "9d355ffe709846d6806e98a81a8656c9044053a6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6f47b5383b6c5384bf11a71dcf2080845065b0f6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "CERNatschool/moedal-grid-user-guide", "max_forks_repo_path": "grid/data.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6f47b5383b6c5384bf11a71dcf2080845065b0f6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "CERNatschool/moedal-grid-user-guide", "max_issues_repo_path": "grid/data.tex", "max_line_length": 108, "max_stars_count": null, "max_stars_repo_head_hexsha": "6f47b5383b6c5384bf11a71dcf2080845065b0f6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CERNatschool/moedal-grid-user-guide", "max_stars_repo_path": "grid/data.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 980, "size": 3659 }
%--------------------------------------------------------------------- % This file provides a skeleton UCL DIS CDT report. % \pdfinclusioncopyfonts=1 % This command may be needed in order to get \ell in PDF plots to appear. Found in % https://tex.stackexchange.com/questions/322010/pdflatex-glyph-undefined-symbols-disappear-from-included-pdf %--------------------------------------------------------------------- % Specify where ATLAS LaTeX style files can be found. \newcommand*{\DISCDTLATEXPATH}{ucldiscdtlatex/latex/} % Use this variant if the files are in a central location, e.g. $HOME/texmf. %--------------------------------------------------------------------- \documentclass[NOTE, disdraft=false, UKenglish]{\DISCDTLATEXPATH UCLCDTDISdoc} % The language of the document must be set: usually UKenglish or USenglish. % british and american also work! % Commonly used options: % cdtdraft=true|false This document is an UCL CDT DIS draft. % paper=a4|letter Set paper size to A4 (default) or letter. %---------------------------------------------------------------------% More packages: \usepackage[backend=biber,style=numeric-comp,sorting=none]{biblatex} % Add you own definitions here (file tbr_final_report-defs.sty). %\usepackage{tbr_final_report-defs} \usepackage{wrapfig} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsthm} \usepackage{bm} \usepackage{graphicx} \usepackage{booktabs} \usepackage{multirow} \usepackage[nameinlink,noabbrev,capitalise]{cleveref} \usepackage{import} \usepackage{xifthen} \usepackage{pdfpages} \usepackage{transparent} \usepackage{subcaption} \usepackage{isotope} \usepackage[version=4]{mhchem} \usepackage{siunitx} \usepackage{multicol} \usepackage{inconsolata} \usepackage[toc,title]{appendix} \usepackage[section]{placeins} %--------------------------------------------------------------------- \DeclareMathOperator*{\argmax}{arg\,max} \DeclareMathOperator*{\argmin}{arg\,min} \newcommand{\incfigwidth}[2]{% \def\svgwidth{#1} \import{./fig/}{#2.pdf_tex} } \newcommand{\incfigscale}[2]{% \def\svgscale{#1} \import{./fig/}{#2.pdf_tex} } \AtBeginBibliography{\scriptsize} % Files with references for use with biblatex. % Note that biber gives an error if it finds empty bib files. \addbibresource{tbr_final_report.bib} % Paths for figures - do not forget the / at the end of the directory name. \graphicspath{{ucldiscdtlatex/logos/}{ucldiscdtlatex/figures/}{./fig/}} %--------------------------------------------------------------------- % Generic document information %--------------------------------------------------------------------- % Title, abstract and document \input{tbr_final_report-metadata} % Author and title for the PDF file \hypersetup{pdftitle={UCL CDT DIS Document},pdfauthor={The UCL CDT DIS}} %--------------------------------------------------------------------- % Content %--------------------------------------------------------------------- \begin{document} \maketitle \tableofcontents \clearpage %--------------------------------------------------------------------- \newpage %--------------------------------------------------------------------- \section{Introduction} \label{sec:introduction} \input{introduction} %--------------------------------------------------------------------- \section{Data Exploration} \label{sec:data} \input{data} %--------------------------------------------------------------------- \section{Methodology} \label{sec:methodology} \input{methodology} %--------------------------------------------------------------------- \section{Results} \label{sec:results} \input{results} %--------------------------------------------------------------------- \section{Conclusion} \label{sec:conclusion} \input{conclusion} %--------------------------------------------------------------------- \nocite{*} \begin{multicols}{2}[\section*{References}] \printbibliography[heading=none] \end{multicols} %--------------------------------------------------------------------- \begin{appendices} \section{Online Materials Overview} \label{app:online-overview} \input{appendix_online} \section{Detailed Results} \label{app:detailed-results} \input{appendix_results} \end{appendices} \end{document}
{ "alphanum_fraction": 0.5492924528, "avg_line_length": 31.4074074074, "ext": "tex", "hexsha": "482d1bfac490745be37e43ceb13c8f96441ae88c", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-06-04T14:53:55.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-04T14:53:55.000Z", "max_forks_repo_head_hexsha": "fcc642a2969e86c532d03254bb4b162e9f23ee01", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ukaea-group-project/Documentation", "max_forks_repo_path": "final_report/tbr_final_report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fcc642a2969e86c532d03254bb4b162e9f23ee01", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ukaea-group-project/Documentation", "max_issues_repo_path": "final_report/tbr_final_report.tex", "max_line_length": 109, "max_stars_count": 1, "max_stars_repo_head_hexsha": "fcc642a2969e86c532d03254bb4b162e9f23ee01", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ukaea-group-project/Documentation", "max_stars_repo_path": "final_report/tbr_final_report.tex", "max_stars_repo_stars_event_max_datetime": "2020-06-22T11:25:42.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-22T11:25:42.000Z", "num_tokens": 964, "size": 4240 }
\subsection{Trees}
{ "alphanum_fraction": 0.7142857143, "avg_line_length": 5.25, "ext": "tex", "hexsha": "b4d354106d0699fded10207cba537897be678bfb", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/computer/objects/06-01-Trees.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/computer/objects/06-01-Trees.tex", "max_line_length": 18, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/computer/objects/06-01-Trees.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6, "size": 21 }
\section{Definition of vector spaces} \label{sec:definition-vector-spaces} \begin{outcome} \begin{enumerate} \item Develop the concept of a vector space through axioms. \item Use the vector space axioms to determine if a set and its operations constitute a vector space. \item Encounter several examples of vector spaces. \end{enumerate} \end{outcome} \begin{definition}{Vector space}{vector-space} Let $K$ be a field. A \textbf{vector space} over $K$% \index{vector space} is a set $V$ equipped with two operations of \textbf{addition}% \index{addition!in a vector space}% \index{vector!addition}% \index{addition!of vectors} and \textbf{scalar multiplication}% \index{scalar multiplication!in a vector space}% \index{vector!scalar multiplication}% \index{scalar multiplication!of a vector}% \index{multiplication!scalar multiplication|see{scalar multiplication}}, such that the following properties hold: \begin{itemize}\setlength\itemsep{0em} \item[(A1)] Commutative law of addition% \index{commutative law!of addition}% \index{vector!commutative law of addition}: $\vect{u} + \vect{v}=\vect{v} + \vect{u}$. \item[(A2)] Associative law of addition% \index{associative law!of addition}% \index{vector!associative law of addition}: $(\vect{u} + \vect{v}) + \vect{w}=\vect{u} + (\vect{v} + \vect{w})$. \item[(A3)] The existence of an additive unit% \index{additive unit}% \index{unit!of addition|see{additive unit}}% \index{vector!additive unit}: there exists an element $\vect{0}\in V$ such that for all $\vect{u}$, $\vect{u} + \vect{0}=\vect{u}$. \item[(A4)] The law of additive inverses% \index{additive inverse}% \index{vector!additive inverse}% \index{inverse!additive}: $\vect{u} + (-\vect{u}) =\vect{0}$. \item[(SM1)] The distributive law over vector addition% \index{distributive law!over vector addition}% \index{vector!distributive law}: $k(\vect{u} + \vect{v}) = k\vect{u} + k\vect{v}$. \item[(SM2)] The distributive law over scalar addition% \index{distributive law!over scalar addition}: $(k + \ell) \vect{u} = k \vect{u} + \ell\vect{u}$. \item[(SM3)] The associative law for scalar multiplication% \index{associative law!of scalar multiplication}% \index{vector!associative law of scalar multiplication}: $k(\ell\vect{u}) = (k \ell)\vect{u}$. \item[(SM4)] The rule for multiplication by one% \index{rule for multiplication by 1}% \index{vector!rule for multiplication by 1}: $1\vect{u}=\vect{u}$. \end{itemize} \end{definition} The above definition is concerned about two operations: vector addition, denoted by $\vect{v} + \vect{w}$, and scalar multiplication, denoted by $k\vect{v}$ or sometimes $k\cdot\vect{v}$. In the law of additive inverses, we have written $-\vect{u}$ for $(-1)\vect{u}$. Often, the scalars will be real numbers, but it is also possible to use scalars from a different field $K$. We also use the term \textbf{$K$-vector space}% \index{K-vector space@$K$-vector space} to refer to a vector space over a field $K$. When $K=\R$, we also speak of a \textbf{real vector space}% \index{vector space!real}% \index{real vector space}, and when $K=\C$, we speak of a \textbf{complex vector space}% \index{vector space!complex}% \index{complex vector space}. If the field is clear from the context, we often don't mention it at all, and just speak of a ``vector space''. The elements of a vector space are called \textbf{vectors}% \index{vector!in a vector space}. Our first example of a vector space is of course $\R^n$. \begin{example}{$\R^n$ is a vector space}{Rn-vector-space} The set $\R^n$ of $n$-dimensional real column vectors, with the usual operations of vector addition and scalar multiplication, is a vector space. More generally, if $K$ is a field, the set $K^n$ of $n$-dimensional column vectors with components in $K$ is a $K$-vector space. \end{example} \begin{proof} Properties (A1)--(A4) hold by Proposition~\ref{prop:properties-vector-addition}, and properties (SM1)--(SM4) hold by Proposition~\ref{prop:vector-scalar-multiplication}. \end{proof} We now consider some other examples of vector spaces. \begin{example}{Vector space of polynomials of degree 2}{vector-space-polynomials2} Let $\Poly_2$% \index{P2@$\Poly_2$}% \index{vector space!of polynomials} be the set of all polynomials% \index{polynomial} of degree at most $2$ with coefficients from a field $K$, i.e., expressions of the form \begin{equation*} p(x) = ax^2 + bx + c, \end{equation*} where $a,b,c\in K$. Define addition% \index{polynomial!addition}% \index{addition!of polynomials} and scalar multiplication% \index{polynomial!scalar multiplication}% \index{scalar multiplication!of polynomials} of polynomials in the usual way, i.e., \begin{eqnarray*} (ax^2 + bx + c) + (a'x^2 + b'x + c') &=& (a + a')x^2 + (b + b')x + (c + c') \\ k(ax^2 + bx + c) &=& ka\,x^2 + kb\,x + kc. \end{eqnarray*} Then $\Poly_2$ is a vector space. \end{example} \begin{proof} To show that $\Poly_2$ is a vector space, we verify the $8$ vector space axioms. Let \begin{eqnarray*} p(x) &=& a_2x^2 + a_1x + a_0, \\ q(x) &=& b_2x^2 + b_1x + b_0, \\ r(x) &=& c_2x^2 + c_1x + c_0 \end{eqnarray*} be polynomials in $\Poly_2$ and let $k,\ell$ be scalars. \begin{itemize} \item[(A1)] We prove the commutative law of addition. \begin{eqnarray*} p(x) + q(x) &=& (a_2x^2 + a_1x + a_0) + (b_2x^2 + b_1x + b_0) \\ &=& (a_2 + b_2)x^2 + (a_1 + b_1)x + (a_0 + b_0) \\ &=& (b_2 + a_2)x^2 + (b_1 + a_1)x + (b_0 + a_0) \\ &=& (b_2x^2 + b_1x + b_0) + (a_2x^2 + a_1x + a_0) \\ &=& q(x) + p(x). \end{eqnarray*} \item[(A2)] We prove the associative law of addition. \begin{eqnarray*} (p(x) + q(x)) + r(x) &=& ((a_2x^2 + a_1x + a_0) + (b_2x^2 + b_1x + b_0)) + (c_2x^2 + c_1x + c_0) \\ &=& ((a_2 + b_2)x^2 + (a_1 + b_1)x + (a_0 + b_0)) + (c_2x^2 + c_1x + c_0) \\ &=& ((a_2 + b_2) + c_2)x^2 + ((a_1 + b_1) + c_1)x + ((a_0 + b_0) + c_0) \\ &=& (a_2 + (b_2 + c_2))x^2 + (a_1 + (b_1 + c_1))x + (a_0 + (b_0 + c_0)) \\ &=& (a_2x^2 + a_1x + a_0) + ((b_2 + c_2)x^2 + (b_1 + c_1)x + (b_0 + c_0)) \\ &=& (a_2x^2 + a_1x + a_0) + ((b_2x^2 + b_1x + b_0) + (c_2x^2 + c_1x + c_0)) \\ &=& p(x) + (q(x) + r(x)). \end{eqnarray*} \item[(A3)] To prove the existence of an additive unit, let $0(x) = 0x^2 + 0x + 0$, the so-called \textbf{zero polynomial}% \index{polynomial!zero}% \index{zero polynomial}. Then \begin{eqnarray*} p(x) + 0(x) &=& (a_2x^2 + a_1x + a_0) + (0x^2 + 0x + 0) \\ &=& (a_2 + 0)x^2 + (a_1 + 0)x + (a_0 + 0) \\ &=& a_2x^2 + a_1x + a_0 \\ &=& p(x). \end{eqnarray*} \item[(A4)] We prove the law of additive inverses. \begin{eqnarray*} p(x) + (-p(x)) &=& (a_2x^2 + a_1x + a_0) + (- a_2x^2 - a_1x - a_0) \\ &=& (a_2 - a_2)x^2 + (a_1 - a_1)x + (a_0 - a_0) \\ &=& 0x^2 + 0x + 0 \\ &=& 0(x). \end{eqnarray*} \item[(SM1)] We prove the distributive law over vector addition. \begin{eqnarray*} k(p(x) + q(x)) &=& k ((a_2x^2 + a_1x + a_0) + (b_2x^2 + b_1x + b_0)) \\ &=& k ((a_2 + b_2)x^2 + (a_1 + b_1)x + (a_0 + b_0)) \\ &=& k(a_2 + b_2)x^2 + k(a_1 + b_1)x + k(a_0 + b_0) \\ &=& (ka_2 + kb_2)x^2 + (ka_1 + kb_1)x + (ka_0 + kb_0) \\ &=& (ka_2x^2 + ka_1x + ka_0) + (kb_2x^2 + kb_1x + kb_0) \\ &=& kp(x) + kq(x). \end{eqnarray*} \item[(SM2)] We prove the distributive law over scalar addition. \begin{eqnarray*} (k + \ell) p(x) &=& (k + \ell) (a_2x^2 + a_1x + a_0) \\ &=& (k + \ell)a_2x^2 + (k + \ell)a_1x + (k + \ell)a_0 \\ &=& (ka_2x^2 + ka_1x + ka_0) + (\ell a_2x^2 + \ell a_1x + \ell a_0) \\ &=& kp(x) + \ell p(x). \end{eqnarray*} \item[(SM3)] We prove the associative law for scalar multiplication. \begin{eqnarray*} k(\ell p(x)) &=& k(\ell(a_2x^2 + a_1x + a_0)) \\ &=& k(\ell a_2x^2 + \ell a_1x + \ell a_0) \\ &=& k\ell a_2x^2 + k\ell a_1x + k\ell a_0 \\ &=& (k\ell) (a_2x^2 + a_1x + a_0) \\ &=& (k\ell) p(x). \end{eqnarray*} \item[(SM4)] Finally, we prove the rule for multiplication by one. \begin{eqnarray*} 1p(x) &=& 1 (a_2x^2 + a_1x + a_0) \\ &=& 1a_2x^2 + 1a_1x + 1a_0 \\ &=& a_2x^2 + a_1x + a_0 \\ &=& p(x). \end{eqnarray*} \end{itemize} Since the operations of addition and scalar multiplication on $\Poly_2$ satisfy the $8$ vector space axioms, $\Poly_2$ is a vector space. \end{proof} Our next example of a vector space is the set of all $n\times m$-matrices. \begin{example}{Vector space of matrices}{vector-space-matrices} Let $\Mat_{m,n}$% \index{Mmn@$\Mat_{m,n}$} be the set of all $m\times n$-matrices with entries in a field $K$% \index{vector space!of matrices}% \index{matrix!vector space of}, together with the usual operations of matrix addition and scalar multiplication. Then $\Mat_{m,n}$ is a vector space. \end{example} \begin{proof} The properties (A1)--(A4) hold by Proposition~\ref{prop:properties-of-addition}, and the properties (SM1)--(SM4) hold by Proposition~\ref{prop:properties-scalar-multiplication}. \end{proof} We now examine an example of a set that does not satisfy all of the above axioms, and is therefore \textit{not} a vector space. \begin{example}{Not a vector space}{not-vector-space} Let $V$ denote the set of $2 \times 3$-matrices. Let us define a non-standard addition in $V$ by $A \oplus B = A$ for all matrices $A,B\in V$. Let scalar multiplication in $V$ be the usual scalar multiplication of matrices. Show that $V$ is not a vector space. \end{example} \begin{solution} In order to show that $V$ is not a vector space, it suffices to find one of the 8 axioms that is not satisfied. We will begin by examining the axioms for addition until one is found which does not hold. In fact, for this example, the very first axiom fails. Let \begin{equation*} A = \begin{mymatrix}{rrr} 1 & 0 & 0 \\ 0 & 0 & 0 \end{mymatrix}, \quad B = \begin{mymatrix}{rrr} 0 & 0 & 0 \\ 1 & 0 & 0 \end{mymatrix}. \end{equation*} Then $A\oplus B=A$ and $B\oplus A=B$. Since $A\neq B$, we have $A\oplus B\neq B\oplus A$ for these two matrices, so property (A1) is false. \end{solution} Our next example looks a little different. \begin{example}{Vector space of functions}{vector-space-function} Let $X$ be a nonempty set, $K$ a field, and define $\Func_{X,K}$% \index{FuncXK@$\Func_{X,K}$} to be the set of functions \index{function!vector space of} defined on $X$ and valued in $K$. In other words, the elements of $\Func_{X,K}$ are functions $f:X\to K$. The sum of two functions is defined by \begin{equation*} (f + g)(x) = f(x) + g(x), \end{equation*} and the scalar multiplication is defined by \begin{equation*} (kf) (x) = k(f(x)). \end{equation*} Then $\Func_{X,K}$ is a vector space. \end{example} \begin{proof} To verify that $\Func_{X,K}$ is a vector space, we must prove the 8 axioms of vector spaces. Let $f, g, h$ be functions in $\Func_{X,K}$, and let $k,\ell$ be scalars. Recall that two functions $f,g$ are \textbf{equal}% \index{function!equality of}% \index{equality of functions} if for all $x\in X$, we have $f(x)=g(x)$. \begin{itemize} \item[(A1)] We prove the commutative law of addition. For all $x\in X$, we have \begin{equation*} (f + g) (x) ~=~ f(x) + g(x) ~=~ g(x) + f(x) ~=~ (g + f) (x). \end{equation*} Therefore, $f + g = g + f$. \item[(A2)] We prove the associative law of addition. For all $x\in X$, we have \begin{equation*} ((f + g) + h) (x) ~=~ (f + g) (x) + h(x) ~=~ (f(x) + g(x)) + h(x) \end{equation*} \begin{equation*} ~=~ f(x) + (g(x) + h(x)) ~=~ (f(x) + (g + h) (x)) ~=~ (f + (g + h)) (x). \end{equation*} Therefore, $(f + g) + h = f + (g + h)$. \item[(A3)] To prove the existence of an additive unit, let $0$ denote the function that is given by $0(x)=0$. This is called the \textbf{zero function}% \index{function!zero function}% \index{zero function}. It is an additive unit because for all $x$, \begin{equation*} (f + 0) (x) ~=~ f(x) + 0(x) ~=~ f(x), \end{equation*} and so $f+0 = f$. \item[(A4)] We prove the law of additive inverses. Let $-f = (-1)f$ be the function that satisfies $(-f) (x) = -f(x)$. Then for all $x$, \begin{equation*} (f + (-f)) (x) ~=~ f(x) + (-f) (x) ~=~ f(x) + -f(x) ~=~ 0. \end{equation*} Therefore $f + (-f) = 0$. \item[(SM1)] We prove the distributive law over vector addition. For all $x$, we have \begin{equation*} (k(f + g)) (x) ~=~ k(f + g) (x) ~=~ k(f(x) + g(x)) \end{equation*} \begin{equation*} ~=~ kf(x) + k g(x) ~=~ (kf + kg) (x), \end{equation*} and so $k(f + g) = kf + kg$. \item[(SM2)] We prove the distributive law over scalar addition. \begin{equation*} ((k + \ell) f) (x) ~=~ (k + \ell) f(x) ~=~ kf(x) + \ell f(x) ~=~ (kf + \ell f) (x), \end{equation*} and so $(k + \ell) f = kf + \ell f$. \item[(SM3)] We prove the associative law for scalar multiplication. \begin{equation*} ((k\ell ) f) (x) ~=~ (k\ell) f(x) ~=~ k(\ell f(x)) ~=~ (k(\ell f)) (x), \end{equation*} so $(k\ell f) =k(\ell f)$. \item[(SM4)] Finally, we prove the rule for multiplication by one. For all $x\in X$, we have \begin{equation*} (1f) ( x) ~=~ 1f(x) ~=~f(x), \end{equation*} and therefore $1f=f$. \end{itemize} It follows that $\Func_{X,K}$ satisfies all the required axioms and is a vector space. \end{proof} For the next two examples of vector spaces, we leave the proofs as an exercise. \begin{example}{Infinite sequences}{vector-space-sequences} Let $K$ be a field. A \textbf{sequence}% \index{sequence} of elements of $K$ is an infinite list \begin{equation*} (a_0,\,a_1,\,a_2,\,a_3\, \ldots), \end{equation*} where $a_i\in K$ for all $i$. We also use the notation $(a_i)_{i\in\N}$, or occasionally $(a_i)$, to denote such a sequence. Let $\Seq_K$ be the set of sequences% \index{vector space!of sequences}% \index{SeqK@$\Seq_K$} of elements of $K$. We add two sequences by adding their $i\th$ elements: \begin{equation*} (a_i)_{i\in\N} + (b_i)_{i\in\N} = (a_i+b_i)_{i\in\N}. \end{equation*} We scale a sequence by scaling each of its elements: \begin{equation*} k(a_i)_{i\in\N} = (ka_i)_{i\in\N}. \end{equation*} Then $\Seq_K$ is a vector space. \end{example} \begin{example}{Vector space of polynomials of unbounded degree}{vector-space-polynomials} Let $K$ be a field, and let $\Poly$% \index{P@$\Poly$}% \index{vector space!of polynomials} be the set of all polynomials% \index{polynomial} (of any degree) with coefficients from $K$, i.e., expressions of the form \begin{equation*} p(x) = a_nx^n + a_{n-1}x^{n-1} + \ldots + a_1x + a_0, \end{equation*} where $n\geq 0$ and $a_0,\ldots,a_n\in K$. Addition and scalar multiplication of polynomials are defined in the usual way \index{polynomial!addition}% \index{addition!of polynomials}% \index{polynomial!scalar multiplication}% \index{scalar multiplication!of polynomials}. Then $\Poly$ is a vector space. \end{example} We conclude this section by deriving some initial consequences of the vector space axioms. \begin{proposition}{Elementary consequences of the vector space axioms}{vector-space-elementary} In any vector space, the following are true: \begin{enumialphparenastyle} \begin{enumerate} \item The additive unit is unique. In other words, whenever $\vect{u}+\vect{v}=\vect{u}$, then $\vect{v}=\vect{0}$. \item Additive inverses are unique. In other words, whenever $\vect{u}+\vect{v}=\vect{0}$, then $\vect{v}=-\vect{u}$. \item\label{vector-space-elementary-c} $0\vect{u}=\vect{0}$ for all vectors $\vect{u}$. \item The following \textbf{cancellation law}% \index{cancellation law!of addition}% \index{vector!cancellation law} holds: if $\vect{u} + \vect{w} = \vect{v} + \vect{w}$, then $\vect{u} = \vect{v}$. \end{enumerate} \end{enumialphparenastyle} \end{proposition} \begin{proof} We prove the first three properties, and leave the last one as an exercise. Assume $V$ is any vector space over a field $K$. \begin{enumialphparenastyle} \begin{enumerate} \item Consider arbitrary vectors $\vect{u},\vect{v}\in V$ and assume \begin{equation*} \vect{u}+\vect{v}=\vect{u}. \end{equation*} Applying the law (A1) (commutative law) to the left-hand side, we have \begin{equation*} \vect{v}+\vect{u}=\vect{u}. \end{equation*} Adding $-\vect{u}$ to both sides of the equation, we have \begin{equation*} (\vect{v}+\vect{u})+(-\vect{u}) = \vect{u}+(-\vect{u}). \end{equation*} Applying the law (A2) (associative law) to the left-hand side, we have \begin{equation*} \vect{v}+(\vect{u}+(-\vect{u})) = \vect{u}+(-\vect{u}). \end{equation*} Applying the law (A4) (additive inverse law) to both sides of the equation, we have \begin{equation*} \vect{v}+\vect{0} = \vect{0}. \end{equation*} Applying the law (A3) (additive unit law) to the left-hand side, we have \begin{equation*} \vect{v} = \vect{0}. \end{equation*} This proves that whenever $\vect{u}+\vect{v}=\vect{u}$, then $\vect{v} = \vect{0}$, or in other words, $\vect{v} = \vect{0}$ is the only element acting as an additive unit. \item Consider arbitrary vectors $\vect{u},\vect{v}\in V$ and assume \begin{equation*} \vect{u}+\vect{v}=\vect{0}. \end{equation*} Applying the law (A1) (commutative law) to the left-hand side, we have \begin{equation*} \vect{v}+\vect{u}=\vect{0}. \end{equation*} Adding $-\vect{u}$ to both sides of the equation, we have \begin{equation*} (\vect{v}+\vect{u})+(-\vect{u}) = \vect{0}+(-\vect{u}). \end{equation*} Applying the law (A2) (associative law) to the left-hand side, we have \begin{equation*} \vect{v}+(\vect{u}+(-\vect{u})) = \vect{0}+(-\vect{u}). \end{equation*} Applying the law (A4) (additive inverse law) to the left-hand side, we have \begin{equation*} \vect{v}+\vect{0} = \vect{0}+(-\vect{u}). \end{equation*} Applying the law (A1) (commutative law) to the right-hand side, we have \begin{equation*} \vect{v}+\vect{0} = -\vect{u} + \vect{0}. \end{equation*} Applying the law (A3) (additive unit law) to both sides of the equation, we have \begin{equation*} \vect{v} = -\vect{u}. \end{equation*} This proves that whenever $\vect{u}+\vect{v}=\vect{0}$, then $\vect{v} = -\vect{u}$, or in other words, $\vect{v} = -\vect{u}$ is the only element acting as an additive inverse of $\vect{u}$. \item First, note that the scalar $0\in K$ satisfies the property $0+0=0$, by property (A3) of the definition of a field. Now let $\vect{u}\in V$ be any vector. Using the vector space law (SM2) (distributive law over scalar addition) and $0+0=0$, we have \begin{equation*} 0\vect{u}+0\vect{u} = (0+0) \vect{u} = 0\vect{u}. \end{equation*} Next, we use a small trick: add $-(0\vect{u})$ to both sides of the equation. This gives \begin{eqnarray*} (0\vect{u}+0\vect{u})+(-(0\vect{u})) &=& 0\vect{u} + (-(0\vect{u})). \end{eqnarray*} Applying the additional laws (A2), (A4), and (A3), we have \begin{eqnarray*} 0\vect{u}+(0\vect{u}+(-(0\vect{u}))) &=& 0\vect{u} + (-(0\vect{u})), \\ 0\vect{u} + \vect{0} &=& \vect{0}, \\ 0\vect{u} &=& \vect{0}. \end{eqnarray*} This proves that $0\vect{u} = \vect{0}$ holds for all vectors $\vect{u}$, as desired. \item This is left as an exercise. \end{enumerate} \end{enumialphparenastyle} \end{proof}
{ "alphanum_fraction": 0.5940335972, "avg_line_length": 38.9398496241, "ext": "tex", "hexsha": "9778ec4a663cf711888b1eba589aea40cb89098b", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-06-30T16:23:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-11-09T11:12:03.000Z", "max_forks_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "selinger/linear-algebra", "max_forks_repo_path": "baseText/content/VectorSpaces-Definition.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "selinger/linear-algebra", "max_issues_repo_path": "baseText/content/VectorSpaces-Definition.tex", "max_line_length": 96, "max_stars_count": 3, "max_stars_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "selinger/linear-algebra", "max_stars_repo_path": "baseText/content/VectorSpaces-Definition.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-30T16:23:10.000Z", "max_stars_repo_stars_event_min_datetime": "2019-03-21T06:37:13.000Z", "num_tokens": 7523, "size": 20716 }
\documentclass[11pt,letterpaper]{article} \usepackage{html} \usepackage{charter} \pagestyle{empty} % % GET THE MARGINS RIGHT, THE UGLY WAY % \topmargin 0.0in \textwidth 6.5in \textheight 9.0in \columnsep 0.25in \oddsidemargin 0.0in \evensidemargin 0.0in \headsep 0.0in \headheight 0.0in \title{Frequently Asked Questions about PVFS} \author{ PVFS Development Team } % \date{Last Updated: September 2004} % % BEGINNING OF DOCUMENT % \begin{document} \maketitle \tableofcontents \thispagestyle{empty} % % BASICS % \section{Basics} This section covers some basic questions for people who are unfamiliar with PVFS. \subsection{What is PVFS?} PVFS is an open-source, scalable parallel file system targeted at production parallel computation environments. It is designed specifically to scale to very large numbers of clients and servers. The architecture is very modular, allowing for easy inclusion of new hardware support and new algorithms. This makes PVFS a perfect research testbed as well. \subsection{What is the history of PVFS?} PVFS was first developed at Clemson University in 1993 by Walt Ligon and Eric Blumer as a parallel file system for Parallel Virtual Machine (PVM). It was developed as part of a NASA grant to study the I/O patterns of parallel programs. PVFS version 0 was based on Vesta, a parallel file system developed at IBM T. J. Watson Research Center. Starting in 1994 Rob Ross re-wrote PVFS to use TCP/IP and departed from many of the original Vesta design points. PVFS version 1 was targeted to a cluster of DEC Alpha workstations networked using switched FDDI. Like Vesta, PVFS striped data across multiple servers and allowed I/O requests based on a file view that described a strided access pattern. Unlike Vesta, the striping and view were not dependent on a common record size. Ross' research focused on scheduling of disk I/O when multiple clients were accessing the same file. Previous results had show than scheduling according the best possible disk access pattern was preferable. Ross showed that this depended on a number of factors including the relative speed of the network and the details of the file view. In some cases a scheduling that based on network traffic was preferable, thus a dynamically adaptable schedule provided the best overall performance. In late 1994 Ligon met with Thomas Sterling and John Dorband at Goddard Space Flight Center (GSFC) and discussed their plans to build the first Beowulf computer. It was agreed that PVFS would be ported to Linux and be featured on the new machine. Over the next several years Ligon and Ross worked with the GSFC group including Donald Becker, Dan Ridge, and Eric Hendricks. In 1997 at a cluster meeting in Pasadena, CA Sterling asked that PVFS be released as an open source package. In 1999 Ligon proposed the development of a new version of PVFS initially dubbed PVFS2000 and later PVFS2. The design was initially developed by Ligon, Ross, and Phil Carns. Ross completed his PhD in 2000 and moved to Argonne National Laboratory and the design and implementation was carried out by Ligon, Carns, Dale Witchurch, and Harish Ramachandran at Clemson University, Ross, Neil Miller, and Rob Lathrum at Argonne National Laboratory, and Pete Wyckoff at Ohio Supercomputer Center. The new file system was released in 2003. The new design featured object servers, distributed metadata, views based on MPI, support for multiple network types, and a software architecture for easy experimentation and extensibility. PVFS version 1 was retired in 2005. PVFS version 2 is still supported by Clemson and Argonne. Carns completed his PhD in 2006 and joined Axicom, Inc. where PVFS was deployed on several thousand nodes for data mining. In 2008 Carns moved to Argonne and continues to work on PVFS along with Ross, Latham, and Sam Lang. Brad Settlemyer developed a mirroring subsystem at Clemson, and later a detailed simulation of PVFS used for researching new developments. Settlemyer is now at Oak Ridge National Laboratory. in 2007 Argonne began porting PVFS for use on an IBM Blue Gene/P. In 2008 Clemson began developing extensions for supporting large directories of small files, security enhancements, and redundancy capabilities. As many of these goals conflicted with development for Blue Gene, a second branch of the CVS source tree was created and dubbed "Orange" and the original branch was dubbed "Blue." PVFS and OrangeFS tracked each other very closely, but represent two different groups of user requirements. \subsection{What is OrangeFS?} Simply put, OrangeFS is PVFS. OrangeFS is a branch of PVFS created by the Clemson team PVFS developers to investigate new features and implementations of PVFS. As of fall 2010 OrangeFS has become the main branch of PVFS. So why the name change? PVFS was originally conceived as a research parallel file system and later developed for production on large high performance machines such as the BG/P at Argonne National Lab. OrangeFS is taking a slightly different approach to support a broader range of large and medium systems and a number of issues PVFS was not concerned with including security, redundancy, and a broader range of applications. The new name reflects this new focus, but for now at least, OrangeFS is PVFS. The PVFS web site is still maintained. The PVFS mailing lists for users and developers have not changed and will be used for OrangeFS. At some point in the future another group may decide to branch from the main but the PVFS site will remain the home for the community. \subsection{What is Omnibond?} Omnibond is a software company that for years has worked with Clemson University to market software developed at the university. As of fall 2010 Omnibond is offering commercial support for OrangeFS/PVFS. OrangeFS is open source and will always be free; and the code, as always, is developed and maintained by the PVFS community. Omnibond is offering profesional services to those who are intersted in it, and directly supports the PVFS community. Omnibond offers its customers the option of dedicated support services and the opportunity to support the development of new features that they feel are critical. Omnibond gives back to the community through their support and development. \subsection{What does the ``V'' in PVFS stand for?} The ``V'' in PVFS stands for virtual. This is a holdover from the original (PVFS1) project that built a parallel file system on top of local file systems, which we still do now. It isn't meant to imply virtualization of storage, although that is sort of what the file system does. \subsection{Is PVFS an attempt to parallelize the *NIX VFS?} No, and we're not even sure what that means! The design of PVFS does not depend on the design of the traditional *NIX Virtual Filesystem Switch (VFS) layer, although we provide a compatibility layer that allows access to the file system through it. \subsection{What are the components of PVFS that I should know about?} The PVFS Guide (\url{http://www.pvfs.org/pvfs2-guide.html}) has more information on all of these components, plus a discussion of the system as a whole, the code tree, and more. \subsection{What is the format of the PVFS version string?} \label{sec:version-string} PVFS uses a three-number version string: X.Y.Z. The first number (X) represents the high level design version of PVFS. The current design version is 2, and will likely remain there. The second number (Y) refers to the major version of the release. Major versions are incremented with new features, protocol changes, public API changes, and storage format changes. The third number (Z) refers to the minor version of the release, and is incremented primarily for bug fix releases. With our 2.6.0 release, we changed the release version and name from PVFS2 1.x.x, to PVFS 2.x.x. Users familiar with 'PVFS2' and had been using PVFS2 1.5.1 will find the same software in PVFS version 2.6.0 or later (with updates and new features of course). Users of PVFS version 1 can still go to: \url{http://www.parl.clemson.edu/pvfs}, although we highly encourage you to upgrade to PVFS version 2, if you are still using version 1. % % SUPPORTED ARCHITECTURES % \section{Supported Architectures and Hardware} This section covers questions related to particular system architectures, operating systems, and other hardware. \subsection{Does PVFS require any particular hardware?} Other than hardware supported by the Linux OS, no. PVFS uses existing network infrastructure for communication and can currently operate over TCP, Myrinet, and InfiniBand. Disk local to servers is used for PVFS storage, so no storage area network (SAN) is required either (although it can be helpful when setting up fault tolerant solutions; see Section~\ref{sec:fault-tolerance}. \subsection{What architectures does PVFS support?} \label{sec:supported-architectures} The majority of PVFS is POSIX-compliant C code that runs in user space. As such, much of PVFS can run on most available systems. See Question~\ref{sec:supported-hw} for more information on particular hardware. The (optional) part of PVFS that hooks to the operating system on clients must be written specifically for the particular operating system. Question~\ref{sec:kernel-version} covers this issue. \subsection{Does PVFS work across heterogeneous architectures?} Yes! The ``language'' that PVFS uses to talk between clients and servers is encoded in a architecture-independent format (little-endian with fixed byte length parameters). This allows different PVFS components to interact seamlessly regardless of architecture. \subsection{Does running PVFS require a particular kernel or kernel version?} \label{sec:kernel-version} You can run the userspace PVFS servers and administration tools on every major GNU/Linux distribution out of the box, and we intend to keep it that way. % However, the kernel module that allows client access to the PVFS system does depend on particular kernel versions because it builds against the running one (in the same manner as every other Linux module). The kernel dependent PVFS client support has been written for Linux kernel versions 2.4.19 (and greater) and 2.6.0 (and greater). At this time only Linux clients have this level of support. \subsection{What specific hardware architectures are supported by the PVFS kernel module?} \label{sec:supported-hw} To our knowledge, PVFS has been verified to be working on x86/IA-32, IA-64, AMD64, PowerPC (ppc), and Alpha based GNU/Linux distributions. \subsection{Does the PVFS client require a patched Linux kernel?} No. The kernel module source included with PVFS is generally targeted toward the official ``Linus'' kernels (found at kernel.org). Patches for the PVFS kernel module code may be provided for major distributions that have modified their kernel to be incompatible with the officially released kernels. The best place to find out more information about support for a kernel tied to a particular distribution is on the PVFS2-developers mailing list. \subsection{Can I build the PVFS kernel code directly into the kernel, rather than as a module?} No, this is currently not supported nor recommended. \subsection{Is there a MacOS X/Cygwin/Windows client for PVFS?} At this time we have no plans for porting the code to operating systems other than Linux. However, we do encourage porting efforts of PVFS to other operating systems, and will likely aid in the development. % % INSTALLATION % \section{Installation} This section covers issues related to installing and configuring PVFS. \subsection{How do I install PVFS?} The PVFS Quick Start Guide (\url{http://www.pvfs.org/pvfs2/pvfs2-quickstart.html}) provides an overview of both a simple, single-server installation, and a more complicated, multi-server configuration. \subsection{How can I store PVFS data on multiple disks on a single node?} \label{sec:multiple-disks} There are at least two ways to do this. In general the best solution to this problem is going to be to get the disks logically organized into a single unit by some other OS component, then build a file system on that single logical unit for use by the PVFS server on that node. There are a wide array of hardware RAID controllers that are capable of performing this task. % The Multiple Devices (MD) driver is a software component of Linux that can be used to combine multiple disk drives into a single logical unit, complete with RAID for fault tolerance. % Using the Logical Volume Management (LVM) component of the Linux OS is another option for this (see the HOWTO at \url{http://www.tldp.org/HOWTO/LVM-HOWTO.html}). LVM would also allow you to add or remove drives at a later time, which can be quite convenient. You can of course combine the MD and LVM components in interesting ways as well, but that's outside the scope of this FAQ. % There's an EVMS program that can be used for managing local storage; this might be useful for setting up complicated configurations of local storage prior to starting up PVFS servers. A second solution would be to use more than one server on the same node, each using a different file system to store its data. This might lead to resource contention issues, so we suggest trying other options first. \subsection{How can I run multiple PVFS servers on the same node?} If you do decide to run more than one PVFS server on the same node, setting things up is as simple as setting up servers on different nodes. Each will need its own entry in the list of Aliases and its own server-specific configuration file, as described in the Quick Start (\url{http://www.pvfs.org/pvfs2/pvfs2-quickstart.html}). \subsection{Can I use multiple metadata servers in PVFS?} Absolutely! Any PVFS server can store either metadata, data, or both. Simply allocate unique MetaHandleRanges for each server that you would like to store metadata; the clients will handle the rest. \subsection{Does using multiple metadata servers reduce the chance of file system corruption during hardware failures?} Unfortunately, no. While using multiple metadata servers distributes metadata, it does not replicate or store redundant information across these servers. For information on better handling failures, see Section~\ref{sec:fault-tolerance}. \subsection{How many servers should I run?} \label{sec:howmany-servers} Really, the answer is ``it depends'', but here are some factors you should take into account. Running multiple metadata servers might help if you expect to have have a lot of small files. The metadata servers are not involved in data access (file contents) but do have a role in file creation and lookup. Multiple clients accessing different files will likely access different metadata servers, so you could see a load balancing effect. A good rule of thumb is you should run as many data servers as possible. One common configuration is to have some nodes with very high-performance disks acting as servers to the larger cluster. As you use more servers in this configuration, the theoretical peak performance of PVFS increases. The clients, however, have to make very large requests in order to stripe the I/O across all the servers. If your clients will never write large files, use a smaller number of servers. If your clients are writing out gigantic checkpoint files or reading in huge datasets, then use more servers. It is entirely possible to run PVFS servers on the same nodes doing computation. In most cases, however, you will see better performance if you have some portion of your cluster dedicated to IO and another portion dedicated to computation. \subsection{Can PVFS servers listen on two network interfaces simultaneously (i.e. multihome)?} Yes! PVFS servers can listen on more than one interface at a time. Multihome support was added shortly before the PVFS2 1.0 release. \subsection{How can I automount PVFS volumes?} The Linux automounter needs some help dealing with PVFS's resource strings. A typical mount command (on Linux 2.6) would look like this: \begin{verbatim} mount -t pvfs2 tcp://server0:3334/pvfs2-fs /mnt/pvfs2 \end{verbatim} The entry in the automount config file should look like this: \begin{verbatim} pvfs -fstype=pvfs2 tcp://server0\:3334/pvfs2-fs \end{verbatim} Note the backslash-escape of the colon before the port number. Without that escape, the automounter will get confused and replace \texttt{'tcp://'} with \texttt{'tcp:///'} \subsection{Can I mount more than one PVFS file system on the same client?} \label{sec:multiple-mounts} Yes. However, when setting up the two file systems it is important that both file systems have unique \texttt{Name} and \texttt{ID} values (in the file system configuration file). This means that you can't simply make a copy of the \texttt{fs.conf} generated by \texttt{pvfs2-genconfig}; you will need to edit the files a bit. This editing needs to be performed \emph{before} you create the storage spaces! \subsection{How can I upgrade from PVFS v1 to PVFS v2?} Hans Reiser summarized the upgrade approach from reiserfs V3 to V4 with the following: \begin{quote} To upgrade from reiserfs V3 to V4, use tar, or sponsor us to write a convertfs. \end{quote} Similarly, there are no tools currently provided by the PVFS team to upgrade from PVFS1 to PVFS2, so tar is your best bet. % % REPORTING PROBLEMS % \section{Reporting Problems} This section outlines some steps that will help the developers figure out what has happened when you have a problem. \subsection{Where can I find documentation?} The best place to look for documentation on PVFS is the PVFS web site at \url{http://www.pvfs.org/}. Documentation (including this FAQ) is also available in the \texttt{doc} subdirectory of the PVFS source distribution. Please reference \texttt{pvfs2-logging.txt} to understand more about PVFS' informational messages, where the logs exist, and how to turn logging on and off. \subsection{What should I do if I have a problem?} The first thing to do is to check out the existing documentation and see if it addresses your problem. We are constantly updating documentation to clarify sections that users have found confusing and to add to this document answers to questions that we have seen. The next thing to do is to check out the PVFS mailing list archives at \url{http://www.pvfs.org/pvfs2/lists.html}. It is likely that you are not the first person to see a particular problem, so searching this list will often result in an immediate answer. If you still haven't found an answer, the next thing to do is to mail the mailing list and report your problem. If you enjoy using IRC, you can also join us on irc.freenode.net in the \#pvfs2 channel. \subsection{How do I report a problem with PVFS?} First you will need to join the PVFS2 Users Mailing list at \url{http://www.beowulf-underground.org/mailman/listinfo/pvfs2-users}. You must be a user to post to the list; this is necessary to keep down the amount of spam on the list. Next you should gather up some information regarding your system: \begin{itemize} \item Version of PVFS \item Version of MPI and MPI-IO (if you're using them) \item Version of Linux kernel (if you're using the VFS interface) \item Hardware architecture, including CPU, network, storage \item Any logs that might be useful to the developers \end{itemize} Including this information in your first message will help the developers most quickly help you. You are almost guaranteed that if you do not include this information in your first message, you will be asked to provide it in the first reply, slowing down the process. You should be aware that you are also likely to be asked to try the newest stable version if you are not running that version. We understand that this is not always possible, but if it is, please do. \emph{Note:} Please do not send your message to both the PVFS2 Users List and the PVFS2 Developers List; the lists serve different purposes. Also, please do not send your message directly to particular developers. By keeping discussion of problems on the mailing lists we ensure that the discussion is archived and that everyone has a chance to respond. % % Problems and Solutions % \section{Problems and Solutions} This section covers error conditions you might encounter, what they might mean, and how to fix them. \subsection{When I try to mount, I get 'wrong fs type, bad option, bad superblock...'} First, make 100\% sure you typed the mount command correctly. As discussed in the PVFS quickstart, different mount commands are needed for linux-2.4 and linux-2.6. A linux-2.6 mount command will look like this: \begin{verbatim} prompt# mount -t pvfs2 tcp://testhost:3334/pvfs2-fs /mnt/pvfs2 \end{verbatim} Under linux-2.4, the mount command looks slightly different: \begin{verbatim} prompt# mount -t pvfs2 pvfs2 /mnt/pvfs2 -o tcp://testhost:3334/pvfs2-fs \end{verbatim} This error could also mean a pvfs2-client process is not running, either because it was not started before the mount command, or was terminated at some point. If you can reliably (or even intermittently) cause the pvfs2-client to exit abnormally, please send a report to the developers. This error can also occur if you attempt to mount a second PVFS file system on a client, where the new file system has the same name or ID as one that is already mounted. If you are trying to mount more than one file system on the same client and have problems, please see question \ref{sec:multiple-mounts}. Finally, be sure there are no typos in your command line, as this is commonly the case! \subsection{PVFS server consumes 100\% of the CPU} \label{sec:server_100pct_cpu} On some systems, the pvfs2-server will start consuming 100\% of the CPU after you try to read or write a file to PVFS. gdb indicates that the server is spending a lot of time in the glibc routine \texttt{'.handle\_kernel\_aio'}. Please check to see if your distribution has an updated glibc package. RHEL3, for example, will exhibit this behavior with glibc-2.3.2-95.6, but not with the updated glibc-2.3.2-95.20 package. We have also seen this behavior on ppc64 systems running glibc-2.3.3-18.ydl.4 . If you encounter this problem and your distribution does not have an updated glibc package, you can configure pvfs2 with \texttt{--disable-aio-threaded-callbacks}, though this will result in a performance hit. An alternate workaround is to set \texttt{LD\_ASSUME\_KERNEL} to 2.4.1 before running pvfs2-server. pvfs2-server will then use an older (and not as optimized) thread library that does not have this bug. At this time we do not know which of the two suggested workarounds is better from a performance standpoint. The \texttt{LD\_ASSUME\_KERNEL} method might make more sense: when/if the system's glibc is upgraded, you will only have to restart pvfs2-server with the environment variable unset. You would not have to rebuild pvfs2 to take advantage of the fix. \subsection{PVFS write performance slows down dramatically} \label{sec:write_slowdown} Phil Carns noticed that on some kernels, write-heavy workloads can trigger a kernel bug. The symptoms are that the PVFS server will only be able to deliver a few KB/s, and the CPU utilization will be close to 100\%. The cause appears to be related to ext3's ``reservation'' code (designed to reduce fragmentation). The solution is to either mount the filesystem with the 'noreservation' option, or upgrade your kernel. For more information, including URLs to several other reports of this issue, see Phil's original post: \url{http://www.beowulf-underground.org/pipermail/pvfs2-developers/2006-March/001885.html} \subsection{I get ``error while loading shared libraries'' when starting PVFS programs} PVFS needs several libraries. If those libraries aren't in the default locations, you might need to add flags when running PVFS's configure script. At configure time you can, for example, pass \texttt{--with-db=/path/to/db --with-gm=/path/to/gm} to compile with Berkeley DB and Myiricom GM libraries. The configure options let the compiler know where to find the libraries at compile time. Those compile-time options, however, aren't enough to find the libraries at run-time. There are two ways to teach the system where to find libraries: \begin{itemize} \item add /usr/local/BerkeleyDB.4.3/lib to the /etc/ld.so.conf config file and re-run 'ldconfig' OR \item add /usr/local/BerkeleyDB.4.3/lib to the \texttt{LD\_LIBRARY\_PATH} environment variable. \end{itemize} I would suggest the ld.so.conf approach, since that will work for all users on your system. \subsection{PVFS performance gets really bad once a day, then gets better again} \label{sec:cron-indexing} Several sites have reported poor PVFS performance early in the day that eventually goes away until the next day, when the cycle begins again. Daily cron jobs might be the culprit in these cases. In particular, most Linux distributions have a daily cron job (maybe called 'slocate', 'locate' or 'updatedb') that indexes the entire file system. Networked file systems such as NFS are often excluded from this indexing. The exact steps to remove PVFS from this indexing process vary among distributions. Generally speaking, there should be a cron script in \texttt{/etc/cron.daily} called 'slocate' or 'updatedb'. That script should have a list of excluded file systems (like /var/run and /tmp) and flie types ( like 'proc' and 'nfs'). Either add 'pvfs2' to the list of file types or add the pvfs2 mount point to the list of excluded file systems. Be sure to do this on all machines in your cluster. \subsection{Make kmod24 fails with ``structure has no member...'' errors} On some Redhat and Redhat-derived distributions, ``make kmod24'' might fail with errors like this: \begin{verbatim} console]:make kmod24 CC [M] /usr/src/pvfs2/src/kernel/linux-2.4/pvfs2-utils.o pvfs2-utils.c: In function `mask_blocked_signals': pvfs2-utils.c:1063: structure has no member named `sig' pvfs2-utils.c:1070: structure has no member named `sigmask_lock' pvfs2-utils.c:1073: too many arguments to function `recalc_sigpending' pvfs2-utils.c: In function `unmask_blocked_signals': pvfs2-utils.c:1082: structure has no member named `sigmask_lock' pvfs2-utils.c:1084: too many arguments to function `recalc_sigpending' make[1]: *** [pvfs2-utils.o] Error 1 make: *** [kmod24] Error 2 \end{verbatim} Redhat, and derived distributions, have a linux-2.4 based kernel with many linux-2.6 features backported. These backported features change the interface to the kernel fairly significantly. PVFS versions newer than 1.0.1 have a new configure option \texttt{--enable-redhat24}. With this option, we will be able to accommodate the backported features (and the associated interface changes). \subsection{When i try to mount a pvfs2 file system, something goes wrong.} \begin{itemize} \item First, are all the userspace components running? If \texttt{pvfs2-ping} doesn't work, the VFS interface won't, either. \item Make sure the pvfs2 kernel module is loaded \item Make sure pvfs2-client and pvfs2-client core are running \item Take a look at dmesg. \texttt{pvfs2\_get\_sb -- wait timed out} could indicate a problem with \texttt{pvfs2-client-core}. See the next question. \end{itemize} \subsection{I did all three of the above steps and I still can't mount pvfs2} \label{sec:nptl_and_mounting} There's one last thing to check. Are you you are using a Redhat or Fedora distribution, but running with a stock kernel.org 2.4 kernel? If so, you need to set the environment variable \texttt{LD\_ASSUME\_KERNEL} to 2.4.1 or \texttt{pvfs2-client-core} will try to use the NPTL thread library. NPTL requires a 2.6 kernel (or a heavily backported 2.4 kernel, which Redhat provides). Redhat systems expect to have such a kernel, so running a stock kernel.org 2.4 kernel can cause issues with any multi-threaded application. In this particular case, the \texttt{pvfs2-client-core} failure is hidden and can be tricky to diagnose. \subsection{I'm running Redhat and the pvfs2-server can't be killed! What's wrong?} On some Redhat systems, for compatibility reasons, the pvfs2-server program is actually a script that wraps the installed pvfs2-server binary. We do this ONLY if we detect that PVFS is being installed on a system with an NPTL implementation that we're incompatible with. Specifically, the script exports the LD\_ASSUME\_KERNEL=2.2.5 environment variable and value to avoid using the NPTL at run-time. The script quite literally exports this variable and then runs the installed pvfs2-server binary which is named \texttt{pvfs2-server.bin}. So to properly shutdown or kill the pvfs2-server application once it's running, you need to issue a \texttt{killall pvfs2-server.bin} command instead of the more common \texttt{killall pvfs2-server} command. \subsection{Why do you single out Redhat users? What's so different about Redhat than other distributions?} Some Redhat versions (and probably some other less popular distributions) use a heavily modified Linux 2.4.x kernel. Due to the changes made in the memory manager and signal handling, our default Linux 2.4.x kernel module will not even compile! We have compatibility code that can mend the differences in place, but we have to be able to detect that you're running such a system. Our configure script tries hard to determine which version you're running and matches it against a known list. If you suspect you need this fix and our script does not properly detect it, please send mail to the mailing list and include the contents of your /etc/redhat-release file. In addition, some Redhat versions ship with an NPTL (threading library) implementation that PVFS is not compatible with. We cannot explain why the errors we're seeing are occurring, as they appear to be in glibc and the threading library itself. In short, we disable the use of the NPTL on these few Redhat systems. It should be noted that we are fully compatible with other distributions that ship NPTL libraries (such as Gentoo and Debian/unstable). \subsection{Where is the kernel source on a Fedora system?} Older systems used to split up the kernel into several packages (\texttt{kernel}, \texttt{kernel-headers}, \texttt{kernel-source}). Fedora kernels are not split up that way. Everything you need to build a kernel module is in /lib/modules/`uname -r`/build. For example, Fedora Core 3 ships with linux-2.6.9-1.667. When configuring PVFS, you would pass \texttt{--with-kernel=/lib/modules/2.6.9-1.667/build} to the configure script. In Fedora Core 4 things changed a little bit. In order to build the pvfs2 kernel module, make sure you have both a \texttt{kernel} and \texttt{kernel-devel} package installed. If you have an SMP box, then you'll need to install the -smp versions of both -- i.e. \texttt{kernel-smp} and \texttt{kernel-smp-devel}. After both packages are installed, /lib/modules/`uname -r`/build will once again contain a correctly configured kernel source tree. \subsection{What are extended attributes? How do I use them with PVFS?} Extended attributes are name:value pairs associated with objects (files and directories in the case of PVFS). They are extensions to the normal attributes which are associated with all objects in the system (i.e. the stat data). A complete overview of the extended attributes concepts can be found in man pages section 5 for attr. On supported 2.4 kernels and all 2.6 kernels, PVFS allows users to store extended attributes on file-system objects through the VFS as well as through the system interface. Example usage scenarios are shown below, To set an extended attribute ("key1", "val1") on a PVFS file foo, \begin{verbatim} prompt# setfattr -n key1 -v val1 /path/to/mounted/pvfs2/foo \end{verbatim} To retrieve an extended attribute for a given key ("key1") on a PVFS file foo, \begin{verbatim} prompt# getfattr -n key1 /path/to/mounted/pvfs2/foo \end{verbatim} To retrieve all attributes of a given PVFS file foo, \begin{verbatim} prompt# getfattr -m "" /path/to/mounted/pvfs2/foo \end{verbatim} Note that PVFS uses a few standard names for its internal use that prohibit users from reusing the same names. A list of such keys are as follows at the time of writing of this document ("dir\_ent", "root\_handle", "datafile\_handles", "metafile\_dist", "symlink\_target"). Further, Linux also uses a set of reserved keys to hold extended attributes that begin with the prefix "system.", thus making them unavailable for regular usage. \subsection{What are Access Control Lists? How do I enable Access Control Lists on PVFS?} Recent versions of PVFS support POSIX Access Control Lists (ACL), which are used to define fine-grained discretionary access rights for files and directories. Every object can be thought of as having associated with it an ACL that governs the discretionary access to that object; this ACL is referred to as an access ACL. In addition, a directory may have an associated ACL that governs the initial access ACL for objects created within that directory; this ACL is referred to as a default ACL. Each ACL consists of a set of ACL entries. An ACL entry specifies the access permissions on the associated object for an individual user or a group of users as a combination of read, write and search/execute permissions. PVFS supports POSIX ACLs by storing them as extended attributes. However, support for access control based permission checking does not exist on 2.4 Linux kernels and is hence disabled on them. Most recent version of the Linux 2.6 kernels do allow for such permission checks, and PVFS enables ACLs on such kernels. However, in order to use and enforce access control lists on 2.6 kernels, one must mount the PVFS file system by specifying the "acl" option in the mount command line. For example, \begin{verbatim} prompt# mount -t pvfs2 tcp://testhost:3334/pvfs2-fs /mnt/pvfs2 -o acl \end{verbatim} Please refer to the man pages of "setfacl", "getfacl" or section 5 acl for detailed usage information. \subsection{On SLES 9, 'make kmod' complains about \texttt{mmgrab} and \texttt{flush\_icache\_range} being undefined} SLES 9 (and possibly other kernels) makes use of internal symobls in some inlined kernel routines. PVFS2-1.3.2 or newer has the configure option \texttt{--disable-kernel-aio}. Passing this option to configure results in a pvfs2 kernel module that uses only exported symbols. \subsection{Everything built fine, but when I try to compile programs that use PVFS, I get undefined references} \label{sec:undefined_references} The \texttt{libpvfs2} library requires a few additional libraries. Usually "-lpthread -lcrypto -lssl" are required. Further, Myrinet and Infiniband have their own libraries. If you do not link the required libraries, you will probably get errors such as \texttt{undefined reference to `BIO\_f\_base64'}. The easiest and most portable way to ensure that you link in all required libraries when you link \texttt{libpvfs2} is to use the \texttt{pvfs2-config} utility. \texttt{pvfs2-config --libs} will give you the full set of linker flags needed. Here's an example of how one might use this tool: \begin{verbatim} $ gcc -c $(pvfs2-config --cflags) example.c $ gcc example.o -o example $(pvfs2-config --libs) \end{verbatim} \subsection{Can we run the Apache webserver to serve files off a PVFS volume?} Sure you can! However, we recommend that you turn off the EnableSendfile option in httpd.conf before starting the web server. Alternatively, you could configure PVFS with the option \texttt{--enable-kernel-sendfile}. Passing this option to configure results in a pvfs2 kernel module that supports the sendfile callback. But we recommend that unless the files that are being served are large enough this may not be a good idea in terms of performance. Apache 2.x+ uses the {\tt sendfile} system call that normally stages the file-data through the page-cache. On recent 2.6 kernels, this can be averted by providing a {\tt sendfile} callback routine at the file-system. Consequently, this ensures that we don't end up with stale or inconsistent cached data on such kernels. However, on older 2.4 kernels the {\tt sendfile} system call streams the data through the page-cache and thus there is a real possibility of the data being served stale. Therefore users of the {\tt sendfile} system call are warned to be wary of this detail. \subsection{Trove-dbpf metadata format version mismatch!} \label{sec:trove-migration} In PVFS2-1.5.0 or newer the format of the metadata storage has change from previous versions (1.4.0 or earlier). This affects users that have created file systems with the earlier versions of pvfs2, and wish to upgrade to the most recent version. We've provided a migration tool that must be run (a one-time only procedure) to convert the file system from the old format to the new one. The migration tool can be used as follows: \begin{verbatim} $PVFS_INSTALL/bin/pvfs2-migrate-collection --all fs.conf server.conf-<hostname> \end{verbatim} This command finds all the pvfs2 storage collections specified in the configuration files and migrates them to the new format. Instead of using {\tt --all}, the option {\tt --fs} can be used to specify the name of the storage collection that needs to be migrated (usually there's only one storage collection, with the default name of 'pvfs2-fs'). \subsection{Problems with pre-release kernels} \label{sec:rc-kernels} For better or worse, the Linux kernel development process for the 2.6 series does not make much effort to maintain a stable kernel API. As a result, we often find we need to make small adjustments to the PVFS kernel module to track recent kernel additions or changes. If you are using a pre-release kernel (anything with -rc in the name), you stand a good chance of running into problems. We are unable to track every pre-release kernel, but do make an effort to publish necessary patches once a kernel is officially released. \subsection{Does PVFS work with Open-MX?} \label{sec:open-mx} Yes, PVFS does work with Open-MX. To use Open-MX, configure PVFS with the the same arguments that you would use for a normal MX installation: ``--disable-bmi-tcp'' and ``--with-mx=PATH''. In addition, however, you must set the ``MX\_IMM\_ACK'' environment variable to ``1'' before starting the pvfs2-server or pvfs2-client daemons. This is necessary in order to account for differences in how MX and Open-MX handle message progression by default. % % PERFORMANCE % \section{Performance} This section covers issues related to the performance of PVFS. \subsection{I configured PVFS with support for multiple intercdonnects (e.g. Infiniband and TCP), but see low performance} \label{sec:multi-method-badperf} When multiple interconnects are enabled, PVFS will poll both interfaces. This gives PVFS maximum flexiblity, but does incur a performance penalty when one interface is not being used. For highest performance, configure PVFS with only one fast method. Consult the \texttt{without-bmi-tcp} option or omit the \texttt{with-<METHOD>} option when configuring PVFS. Note that it can sometimes be useful to have multiple interconnects enabled. The right choice depends a lot on your situation. \subsection{I ran Bonnie and/or IOzone and the performance is terrible. Why? Is there anything I can do?} \label{sec:badperf} We designed PVFS to work well for scientific applications in a cluster environment. In such an environment, a file system must either spend time ensuring all client-side caches are in sync, or not use a cache at all (which is how PVFS currently operates). The \texttt{bonnie} and \texttt{bonnie++} benchmarks read and write very small blocks -- on the order of 1K. These many small requests must travel from the client to the server and back again. Without client-side caching, there is no sane way to speed this up. To improve benchmark performance, specify a bigger block size. PVFS has several more aggressive optimizations that can be turned on, but those optimizations require that applications accessing PVFS can cope with out-of-sync caches. In the future, PVFS is looking to provide optional semantics for use through the VFS that will allow some client-side caching to speed these kinds of serial benchmarks up. By offering a way to explicitly sync data at any given time or by providing 'close-to-open' semantics, these kinds of caching improvements become an option for some applications. Bear in mind that benchmarks such as IOzone and Bonnie were meant to stress local file systems. They do not accurately reflect the types of workloads for which we designed PVFS. Furthermore, because of their serial nature, PVFS will be unable to deliver its full performance. Instead try running a parallel file system benchmark like IOR (\url{ftp://ftp.llnl.gov/pub/siop/ior/}). \subsection{Why is program XXX so slow?} \label{sec:why_so_slow} See Question~\ref{sec:badperf}. If the program uses small block sizes to access a PVFS file, performance will suffer. Setting both (or either of) the \texttt{TroveSyncMeta} and \texttt{TroveSyncData} options to \texttt{no} in the config file can improve performance in some situations. If you set the value to no and the server is terminated unexpectedly, you will likely lose data (or access to it). Also, PVFS has a transparent server side attribute cache (enabled by default), which can speed up applications which read a lot of attributes (\texttt{ls}, for example). Playing around with the \texttt{AttrCache*} config file settings may yield some performance improvements. If you're running a serial application on a single node, you can also use the client side attribute cache (disabled by default). This timeout is adjustable as a command line argument to pvfs2-client. \subsection{NFS outperforms PVFS for application XXX. Why?} \label{sec:nfs_vs_pvfs2} In an environment where there is one client accessing a file on one server, NFS will outperform PVFS in many benchmarks. NFS has completely different consistency semantics, which work very well when just one process accesses a file. There is some ongoing work that will optionally offer similar consistency semantics for PVFS, at which point we will be playing on a level field, so to speak. However, if you insist on benchmarking PVFS and NFS in a single-client test, there are some immediate adjustments you can make. The easiest way to improve PVFS performance is to increase the block size of each access. Large block sizes help most file systems, but for PVFS they make a much larger difference in performance than they do for other file systems. Also, if the \texttt{TroveSyncMeta} and \texttt{TroveSyncData} options are set to \texttt{no} in your PVFS configuration file, the server will sync data to disk only when a flush or close operation is called. The \texttt{TroveSyncMeta} option is set to \texttt{yes} by default, to limit the amount of data that could be lost if a server is terminated unexpectedly. With this option enabled, it is somewhat analogous to mounting your NFS volume with the \texttt{sync} flag, forcing it to sync data after each operation. As a final note on the issue, if you plan on running application XXX, or a similar workload, and the NFS consistency semantics are adequate for what you're doing, then perhaps PVFS is not a wise choice of file system for you. PVFS is not designed for serial workloads, particularly one with small accesses. \subsection{Can the underlying local file system affect PVFS performance?} \label{sec:local_fs} Yes! However, the interaction between the PVFS servers and the local file system hosting the storage space has not been fully explored. No doubt a great deal of time could be spent on different file systems and their parameters. People have looked at sync performance for a variety of file systems. Some file systems will flush all dirty buffers when \texttt{fsync} is called. Other file systems will only flush dirty buffers belonging to the file. See the threads starting at \url{http://www.parl.clemson.edu/pipermail/pvfs2-developers/2004-July/000740.html} and at \url{http://www.parl.clemson.edu/pipermail/pvfs2-developers/2004-July/000741.html}. These tests demonstrate wide variance in file system behavior. Interested users are encouraged to experiment and discuss their findings on the PVFS lists. If you're looking for a quick suggestion for a local file system type to use, we suggest ext3 with ``journal data writeback'' option as a reasonable choice. \subsection{Is there any way to tune particular directories for different workloads?} \label{sec:dir_tuning} Yes. This can be done by using extended attributes to set directory hints. Three hints are currently supported, and they allow you to specify the distribution, distribution parameters, and number of datafiles to stripe across. They will not change the characteristics of existing files, but they will take effect for any newly created files within the directory. These hints will also be inherited by any new subdirectories. \subsubsection{Distribution} The distribution can be set as follows: \begin{verbatim} prompt# setfattr -n "user.pvfs2.dist_name" -v "basic_dist" /mnt/pvfs2/directory \end{verbatim} Supported distribution names can be found by looking in the pvfs2-dist-* header files. \subsubsection{Distribution parameters} Some distributions allow you to set parameters that impact how the distribution behaves. These parameters can be set as follows: \begin{verbatim} prompt# setfattr -n "user.pvfs2.dist_params" -v "strip_size:4096" /mnt/pvfs2/directory \end{verbatim} You can specify more than one "parameter:value" pair by seperating them with commas. \subsubsection{Number of datafiles} You can also specify the number of datafiles to stripe across: \begin{verbatim} prompt# setfattr -n "user.pvfs2.num_dfiles" -v "1" /mnt/pvfs2/directory \end{verbatim} PVFS defaults to striping files across each server in the file system. However, you may find that for small files it is advantages to limit each file to only a subset of servers (or even just one). \subsection{My app still runs more slowly than I would like. What can I do?} \label{sec:tuning} If you ask the mailing list for help with performance, someone will probably ask you one or more of the following questions: \begin{itemize} \item Are you running servers and clients on the same nodes? We support this configuration -- sometimes it is required given space or budget constraints. You will not, however, see the best performance out of this configuration. See Section~\ref{sec:howmany-servers}. \item Have you benchmarked your network? A tool like netpipe or ttcp can help diagnose point-to-point issues. PVFS will tax your bisection bandwidth, so if ppossible, run simultaneous instances of these network benchmarks on multiple machine pairs and see if performance suffers. One user realized the cluster had a hub (not a switch, a hub) connecting all the nodes. Needless to say, performance was pretty bad. \item Have you examined buffer sizes? On linux, the settings /proc can make a big difference in TCP performance. Set \texttt{/proc/sys/net/core/rmem\_default} and \texttt{/proc/sys/net/core/wmem\_default} \end{itemize} Tuning applications can be quite a challenge. You have disks, networks, operating systems, PVFS, the application, and sometimes MPI. We are working on a document to better guide the tuning of systems for IO-intensive workloads. % % REDUNDANCY % \section{Fault Tolerance} \label{sec:fault-tolerance} This section covers issues related to fault tolerance in the context of PVFS. \subsection{Does PVFS support some form of fault tolerance?} Systems can be set up to handle many types of failures for PVFS. Given enough hardware, PVFS can even handle server failure. \subsection{Can PVFS tolerate client failures?} Yes. One of the benefits of the PVFS design is that client failures are not a significant event in the system. Because there is no locking system in PVFS, and no shared state stored on clients in general, a client failure does not affect either the servers or other clients. \subsection{Can PVFS tolerate disk failures?} Yes, if configured to do so. Multiple disks on each server may be used to form redundant storage for that server, allowing servers to continue operating in the event of a disk failure. See section \ref{sec:multiple-disks} for more information on this approach. \subsection{Can PVFS tolerate network failures?} Yes, if your network has redundant links. Because PVFS uses standard networks, the same approaches for providing multiple network connections to a server may be used with PVFS. \emph{Need a reference of some sort.} \subsection{Can PVFS tolerate server failures?} Yes. We currently have a recipe describing the hardware and software needed to set up PVFS in a high availability cluster. Our method is outlined in the `pvfs2-ha.\{ps,pdf\}' file in the doc subdirectory of the PVFS distribution. This configuration relies on shared storage and commodity ``heartbeat'' software to provide means for failover. Software redundancy offers a less expensive solution to redundancy, but usually at a non-trivial cost to performance. We are studying how to implement software redundancy with lower overhead, but at this time we provide no software-only server failover solution. % % INTERFACES % \section{File System Interfaces} This section covers issues related to accessing PVFS file systems. \subsection{How do I get MPI-IO for PVFS?} The ROMIO MPI-IO implementation, as provided with MPICH2 and others, supports PVFS. You can find more information in the ROMIO section of the pvfs2-quickstart: \url{http://www.pvfs.org/pvfs2/pvfs2-quickstart.html\#sec:romio} \subsection{Can I directly manipulate PVFS files on the PVFS servers without going through some client interface?} You can, yes, but you probably should not. The PVFS developers are not likely to help you out if you do this and something gets messed up... % % MANAGEMENT % \section{Management} This section covers questions about managing PVFS file systems. \subsection{How can I back up my PVFS file system?} The default storage implementation for PVFS (called Trove DBPF for ``DB Plus Files'') stores all file system data held by a single server in a single subdirectory. In that subdirectory is a directory tree containing UNIX files with file data and metadata. % This entire directory tree can be backed up in any manner you like and restored if problems occur. As a side note, this was not possible in PVFS v1, and is one of the many improvements present in the new system. \subsection{Can I add, remove, or change the order of the PVFS servers on an existing PVFS file system?} You can add and change the order of PVFS servers for an existing PVFS file system. At this time, you must stop all the servers in order to do so. To add a new server: \begin{enumerate} \item Unmount all clients \item Stop all servers \item Edit your config file to: \begin{enumerate} \item Add a new Alias for the new server \item Add a new DataHandleRange for the new server (picking a range you didn't previously use) \end{enumerate} \item Deploy the new config file to all the servers, including the new one \item Create the storage space on the new server \item Start all servers \item Remount clients \end{enumerate} To reorder the servers (causing round-robin to occur in a different relative order): \begin{enumerate} \item Unmount all clients \item Stop all servers \item Edit your config file to reorder the DataHandleRange entries \item Deploy the new config file to all the servers \item Start all servers \item Remount clients \end{enumerate} Note that adding a new server will \emph{not} cause existing datafiles to be placed on the new server, although new ones will be (by default). Migration tools are necessary to move existing datafiles (see Question~\ref{sec:migration}) both in the case of a new server, or if you wanted to migrate data off a server before removing it. \subsection{Are there tools for migrating data between servers?} \label{sec:migration} Not at this time, no. \subsection{Why does df show less free space than I think it should? What can I do about that?} \label{sec:df-free-space} PVFS uses a particular algorithm for calculating the free space on a file system that takes the minimum amount of space free on a single server and multiplies this value by the number of servers storing file data. % This algorithm was chosen because it provides a lower-bound on the amount of data that could be stored on the system at that point in time. If this value seems low, it is likely that one of your servers has less space than the others (either physical space, or because someone has put some other data on the same local file system on which PVFS data is stored). The \texttt{pvfs2-statfs} utility, included with PVFS, can be used to check the amount of free space on each server, as can the \texttt{karma} GUI. \subsection{Does PVFS have a maximum file system size? If so, what is it?} PVFS uses a 64-bit value for describing the offsets into files, so theoretically file sizes are virtually unlimited. However, in practice other system constraints place upper bounds on the size of files and file systems. To best calculate maximum file and file system sizes, you should determine the maximum file and file system sizes for the local file system type that you are using for PVFS server storage and multiply these values by the number of servers you are using. \subsection{Mouning PVFS with the interrupt option} \label{sec:mountintr} The PVFS kernel module supports the {\tt intr} option provided by network file systems. This allows applications to be sent kill signals when a filesystem is unresponsive (due to network failures, etc.). The option can be specified at mount time: \begin{verbatim} mount -t pvfs2 -o intr tcp://hosta:3334/pvfs2-fs /pvfs-storage/ \end{verbatim} % % MISSING FEATURES % \section{Missing Features} This section discusses features that are not present in PVFS that are present in some other file systems. \subsection{Why don't hardlinks work under PVFS?} We didn't implement hardlinks, and there is no plan to do so. Symlinks are implemented. \subsection{Can I \texttt{mmap} a PVFS file?} Private, read-only mmapping of files is supported. Shared mmapping of files is not. Supporting this would force a great deal of additional infrastructure into PVFS that would compromise the design goals of simplicity and robustness. This ``feature'' was intentionally left out, and it will remain so. \subsection{Will PVFS store new files on servers with more space, allowing files to be stored when one server runs out of space?} No. Currently PVFS does not intelligently place new files based on free space. It's a good idea, and possible, but we have not done this yet. See Section~\ref{sec:contributing} for notes on how you could help get this feature in place. \subsection{Does PVFS have locks?} No. Locking subsystems add a great deal of shared state to a parallel file system implementation, and one of the primary design goals was to eliminate shared state in PVFS. This results in a simpler, more fault tolerant overall system than would have been possible had we integrated locking into the file system. It's possible that an add-on locking subsystem will be developed at some point; however, there is no plan to build such a system at this time. % % HELPING OUT % \section{Helping Out} This section covers ways one could contribute to the PVFS project. \subsection{How can I contribute to the PVFS project?} \label{sec:contributing} There are lots of ways to directly or indirectly contribute to the PVFS project. Reporting bugs helps us make the system better, and describing your use of the PVFS system helps us better understand where and how PVFS is being deployed. Even better, patches that fix bugs, add features, or support new hardware are very welcome! The PVFS community has historically been a friendly one, and we encourage users to discuss issues and exchange ideas on the mailing lists. If you're interested in this type of exchange, we suggest joining the PVFS2 Developers List, grabbing the newest CVS version of the code, and seeing what is new in PVFS. See \url{http://www.pvfs.org/pvfs2/developers.html} for more details. % % IMPLEMENTATION DETAILS % \section{Implementation Details} This section answers questions regarding specific components of the implementation. It is most useful for people interested in augmenting or modifying PVFS. \subsection{BMI} This section specifically covers questions about the BMI interface and implementations. \subsubsection{What is the maximum packet size for BMI?} Each BMI module is allowed to define its own maximum message size. See \texttt{BMI\_tcp\_get\_info}, \texttt{BMI\_gm\_get\_info}, and \texttt{BMI\_ib\_get\_info} for examples of the maximum sizes that each of the existing modules support. The maximum should be reported when you issue a \texttt{get\_info} call with the option set to \texttt{BMI\_CHECK\_MAXSIZE}. Higher level components of PVFS perform these checks in order to make sure that they don't choose buffer sizes that are too large for the underlying network. \subsubsection{What happens if I try to match a BMI send with a BMI receive that has too small a buffer?} If the receive buffer is too small for the incoming message, then the communication will fail and an error will be reported if possible. We don't support any semantics for receiving partial messages or anything like that. Its ok if the receive buffer is too big, though. \end{document}
{ "alphanum_fraction": 0.7849597197, "avg_line_length": 45.0595975232, "ext": "tex", "hexsha": "ee4a3abcf9ef42c10ab6d4e06b42f07c73e4185b", "lang": "TeX", "max_forks_count": 7, "max_forks_repo_forks_event_max_datetime": "2021-09-26T07:19:44.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-23T13:40:21.000Z", "max_forks_repo_head_hexsha": "386a12df2b0310ec2e0d37aba092204d9490c7be", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dschwoerer/orangefs", "max_forks_repo_path": "doc/pvfs2-faq.tex", "max_issues_count": 84, "max_issues_repo_head_hexsha": "386a12df2b0310ec2e0d37aba092204d9490c7be", "max_issues_repo_issues_event_max_datetime": "2021-09-05T19:37:51.000Z", "max_issues_repo_issues_event_min_datetime": "2018-05-31T20:14:51.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dschwoerer/orangefs", "max_issues_repo_path": "doc/pvfs2-faq.tex", "max_line_length": 122, "max_stars_count": 44, "max_stars_repo_head_hexsha": "386a12df2b0310ec2e0d37aba092204d9490c7be", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dschwoerer/orangefs", "max_stars_repo_path": "doc/pvfs2-faq.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-16T11:23:49.000Z", "max_stars_repo_stars_event_min_datetime": "2018-10-11T23:16:42.000Z", "num_tokens": 14181, "size": 58217 }
\section{Konverzija iz HDF5 u CSV format} \label{sec:DodatakIzvlacenje} \begin{figure}[H] \lstset{style=mystyle} \begin{lstlisting}[language=Python, basicstyle=\footnotesize] """ Alexis Greenstreet (October 4, 2015) University of Wisconsin-Madison """ class Song: songCount = 0 def __init__(self, songID): self.id = songID Song.songCount += 1 self.albumID = None # string self.albumName = None # string self.artistFamiliarity = None # float self.artistHottnesss = None # float self.artistID = None # string self.artistLatitude = None # float self.artistLocation = None # string self.artistLongitude = None # float self.artistName = None # string self.audioMd5 = None # string self.danceability = None # float self.duration = None # float self.endOfFadeIn = None # float self.energy = None # float self.genre = None # string self.genreList = [] # list of strings self.key = None # int self.keyConfidence = None # float self.keySignature = None # float self.keySignatureConfidence = None # float self.loudness = None # float self.mode = None # int self.modeConfidence = None # float self.release = None # string self.songHotttness = None # float self.songId = None # string self.startOfFadeOut = None # float self.tempo = None # float self.timeSignature = None # int self.timeSignatureConfidence = None # float self.title = None # string self.trackId = None # string self.year = None # int \end{lstlisting} \label{code:SongClass} \caption{Klasa kori\v{s}\'c{}ena za deserijalizaciju podataka.} \end{figure} \begin{figure}[H] \lstset{style=mystyle} \begin{lstlisting}[language=Python, basicstyle=\footnotesize] """ Alexis Greenstreet (October 4, 2015) University of Wisconsin-Madison """ outputFile1 = open('SongCSV.csv', 'w') csvRowString = "" csvRowString = ("SongID,AlbumID, ...") csvAttributeList = re.split('\W+', csvRowString) for i, v in enumerate(csvAttributeList): csvAttributeList[i] = csvAttributeList[i].lower() outputFile1.write("SongNumber,"); outputFile1.write(csvRowString + "\n"); csvRowString = "" ################################################# #Set the basedir here, the root directory from which the search basedir = "/home/m/Documents/MillionSongSubset/data" ext = ".h5" ################################################# csvRowStringTotal = "" for root, dirs, files in os.walk(basedir): files = glob.glob(os.path.join(root,'*'+ext)) for f in files: print f songH5File = hdf5_getters.open_h5_file_read(f) song = Song(str(hdf5_getters.get_song_id(songH5File))) song.artistID = str(hdf5_getters.get_artist_id(songH5File)) # Isto za ostala polja artistMbtags = np.array(hdf5_getters.get_artist_mbtags(songH5File)) song.genre = ' | '.join(artistMbtags) csvRowString += str(song.songCount) + "," csvRowString += song.id + "," # Isto za ostala polja csvRowString += song.trackId + "," csvRowString += song.genre + "\n" csvRowStringTotal += csvRowString csvRowString = "" songH5File.close() outputFile1.write(csvRowStringTotal) outputFile1.close() \end{lstlisting} \label{code:ConvertToCSV} \caption{Upro\v{s}\'c{}ena verzija programa kori\v{s}\'c{}enog za konvertovanje iz HDF5 u CSV format.} \end{figure}
{ "alphanum_fraction": 0.550036523, "avg_line_length": 37.3363636364, "ext": "tex", "hexsha": "035c08cd3740148f6d7166fbad03ce5e3e2580bd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "803acfeaab5a5f8af5ca982fedd0dafd7e7d6734", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ivan-ristovic/papers", "max_forks_repo_path": "Data_Mining_Million_Songs_Dataset_RS/delovi/20_dodatak_izvlacenje.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "803acfeaab5a5f8af5ca982fedd0dafd7e7d6734", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ivan-ristovic/papers", "max_issues_repo_path": "Data_Mining_Million_Songs_Dataset_RS/delovi/20_dodatak_izvlacenje.tex", "max_line_length": 102, "max_stars_count": null, "max_stars_repo_head_hexsha": "803acfeaab5a5f8af5ca982fedd0dafd7e7d6734", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ivan-ristovic/papers", "max_stars_repo_path": "Data_Mining_Million_Songs_Dataset_RS/delovi/20_dodatak_izvlacenje.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 983, "size": 4107 }
\chapter{Introduction} \lipsum[2-4] \section{Motivation and Objectives} \lipsum[1-2] \section{Contributions from this Research} \lipsum[1] \subsection{Part I} \lipsum[66] \paragraph{Contribution Title} \Blindtext[1][1] \paragraph{Contribution Title} \Blindtext[1][1] \paragraph{Contribution Title} \Blindtext[1][1] \subsection{Part II} \lipsum[66] \paragraph{Contribution Title} \lipsum[1] \paragraph{Contribution Title} \lipsum[1] \subsection{Part III} \lipsum[75] \paragraph{Contribution Title} \lipsum[1] \section{Organization of the Thesis} \begin{itemize} \item \textbf{Chapter 1} introduces \item \textbf{Chapter 2} gives \item \textbf{Chapter 3} explores \item \textbf{Chapter 4} scrutinizes \item \textbf{Chapter 5} introduces \item \textbf{Chapter 6} presents \item \textbf{Chapter 7} develops \item \textbf{Chapter 8} shows \item \textbf{Chapter 9} concludes \end{itemize}
{ "alphanum_fraction": 0.7011375388, "avg_line_length": 16.6724137931, "ext": "tex", "hexsha": "66e1a838149b96ef45e9d4b330f2c99676c54ab9", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-05-08T16:08:30.000Z", "max_forks_repo_forks_event_min_datetime": "2018-01-04T08:43:16.000Z", "max_forks_repo_head_hexsha": "5a280aacef92ab7c939e7980145c9bc202b77d9c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "c-i-p-h-e-r/PhinisheD", "max_forks_repo_path": "thesisInChapters/3_introduction/intro.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5a280aacef92ab7c939e7980145c9bc202b77d9c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "c-i-p-h-e-r/PhinisheD", "max_issues_repo_path": "thesisInChapters/3_introduction/intro.tex", "max_line_length": 43, "max_stars_count": 6, "max_stars_repo_head_hexsha": "5a280aacef92ab7c939e7980145c9bc202b77d9c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "c-i-p-h-e-r/PhinisheD", "max_stars_repo_path": "thesisInParts/3_introduction/intro.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-09T06:09:01.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-04T07:21:18.000Z", "num_tokens": 297, "size": 967 }
%\documentclass[a4paper,10pt]{article} \documentclass{article} \usepackage{epsfig,relsize,moreverb,fancyvrb,A4} %\addtolength{\textwidth}{1cm} \input{macros1} \begin{document} \author{Hans Petter Langtangen\thanks{Dept.~of Scientific Computing, Simula Research Laboratory, and Dept.~of Informatics, University of Oslo. \texttt{[email protected]}.}} \title{Useful Script for Compilation} \date{\today} \maketitle % fill in from Compiler tools chapter \section{Using the C Preprocessor in Fortran Codes} C and C++ compilers run a preprocessor\footnote{This section probably does not make much sense if you are not familiar with the C or C++ preprocessor.} prior to the compilation. The preprocessor is a handy tool that is, unfortunately, not integrated with Fortran compilers. Nevertheless, the preprocessor can be executed as a stand-alone program, called \code{cpp}, or it can be run as a part of a C compiler (e.g., \code{gcc -E}). In this way, one can apply the preprocessor to any file, including Fortran source code files. We shall now develop a script that transforms a Fortran file with C preprocessor directives to standard Fortran syntax. This will allow writing Fortran programs with, e.g., include statements (\code{\#include}), macros (\code{\#define}), and C-style (possible multi-line) comments (\code{/* ... */}). We let Fortran files containing C preprocessor directives have the extension \code{.fcp}. The script is to be invoked with the following command-line parameters: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] [cpp options] file1.fcp \end{Verbatim} \noindent That is, standard \code{cpp} options can be present, followed by the name of a single Fortran file. Typical examples on \code{cpp} options are definitions of macros, like \code{-DMY\_DEBUG=2}, and specification of directories with include files, like \code{-I../mydir}. The C preprocessor is run by the command \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] cpp [cpp options] file1.fcp > file1.f \end{Verbatim} \noindent if you have \code{cpp} available as a separate program. Since \code{cpp} is not always present as a separate program, we recommend to run the preprocessor as part of GNU's C compiler \code{gcc}, since \code{gcc} is a standard utility found on most machines. The relevant commands are then \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] cp file1.fcp tmp.c # gcc must work with a file with suffix .c gcc -E -c [cpp options] tmp.c > file1.f rm -f tmp.c \end{Verbatim} \noindent In Python this becomes \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] cpp_options = ' '.join(sys.argv[1:-1]) shutil.copy(fcp_file, 'tmp.c') cmd = 'gcc -E %s -c tmp.c > %s.f' % (cpp_options,fcp_file[:-4]) os.system(cmd) os.remove('tmp.c') \end{Verbatim} \noindent A fundamental problem with the macro expansions performed by the preprocessor is that code lines can easily exceed 72 characters, which is illegal according to the Fortran 77 standard. Although modern Fortran 77 compilers, and in particular Fortran 90/95 compilers, allow longer line lengths, buffer overflow is not unusual for long lines (longer than (say) 255 characters). Since macros are expanded to a single line, there is a danger of very long lines, and the script needs to split lines that are longer than a specified number of characters, which we here set to 72. Fortunately, whitespace is not significant in Fortran so one can split a line by just inserting newline, five blanks, and any character (indicating continuation of a line) in column six. \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] # split lines that are longer than maxlen chars: maxlen = 72 f = open(fcp_file[:-4]+'.f', 'r'); lines = f.readlines(); f.close() for i in range(len(lines)): line = lines[i] if len(line) > maxlen: # split line? nrest = len(line) - maxlen splitline = line[0:maxlen] start = maxlen while nrest > 0: splitline = splitline + '\n &' + \ line[start:start+maxlen-6] start = start + maxlen-6 nrest = nrest - (maxlen-6) lines[i] = splitline # in-place list modification \end{Verbatim} \noindent Note that this split feature makes the script convenient for writing Fortran source code files without any restriction on the line length, besides allowing the use of any C preprocessor directive. Newer C preprocessors preserve indentation, but minimize whitespace elsewhere such that labels like \code{10 CONTINUE} appear as \code{10 CONTINUE}, which is not valid Fortran 77 since \code{CONTINUE} starts before column 7. We therefore need to ensure that labels in columns 1-6 appear correctly: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] c = re.compile(r'^(\s*)(\d+)(\s*)') # label regex for i in range(len(lines)): # remove lines starting with # lines[i] = re.sub(r'^#.*', ' ', lines[i]) if len(lines[i]) >= 5: column1to5 = lines[i][0:5] if re.search(r'\w',column1to5): # letter after label? m = re.search(r'(\s*)(\d+)(\s+)\w+', column1to5) if m: # insert extra white space after group 3 n = len(m.group(1))+len(m.group(2))+len(m.group(3)) space = ''.join([' ']*(6 - n)) lines[i] = m.group(1) + m.group(2) + m.group(3) + \ space + lines[i][n:] \end{Verbatim} \noindent Writing \code{lines} back to the Fortran file finishes the script. The complete script is called \code{fccp.py} and is available in \code{src/tools}. Here is a simple test example that \code{fccp.py} can handle. Two macros are defined in a file \code{macros.i}, stored in (say) \code{/home/hpl/f77macros}, \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] #define DDx(u, i, j, dx) \ (u(i+1,j) - 2*u(i,j) + u(i-1,j))/(dx*dx) #define DDy(u, i, j, dy) \ (u(i,j+1) - 2*u(i,j) + u(i,j-1))/(dy*dy) \end{Verbatim} \noindent A Fortran 77 file \code{wave1.fcp} with C macros, \code{\#ifdef} directives, and C-style comments has the following form: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] #include <macros.i> C234567 column numbers 1-7 are important in F77! SUBROUTINE WAVE1(SOL, SOL_PREV, SOL_PREV2, NX, NY, & DX, DY, DT) C variable declarations: INTEGER NX, NY /* no of points in x and y dir */ REAL*8 DX, DY, DT /* cell and time increments */ REAL*8 SOL(NX,NY), SOL_PREV(NX,NY), SOL_PREV2(NX,NY) C update SOL: DO 20 J=1, NY DO 10 I=1, NX /* a 2nd-order time difference combined with 2nd-order differences in space results in the standard explicit finite difference scheme for the wave equation: */ SOL(I,J) = 2*SOL_PREV(I,J) - SOL_PREV2(I,J) + & DT*DT*(DDx(SOL_PREV, I, J, DX) + & DDy(SOL_PREV, I, J, DY)) #ifdef DEBUG WRITE(*,*) 'SOL(',I,',',J,')=',SOL(I,J) #endif 10 CONTINUE 20 CONTINUE RETURN END \end{Verbatim} \noindent We may then run \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] fccp.py -I/home/hpl/f77macros wave1.fcp \end{Verbatim} \noindent and get a valid Fortran 77 file \code{wave1.f}, which looks like this: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt] C234567 column numbers 1-7 are important in F77! SUBROUTINE WAVE1(SOL, SOL_PREV, SOL_PREV2, NX, NY, & DX, DY, DT) C variable declarations: INTEGER NX, NY REAL*8 DX, DY, DT REAL*8 SOL(NX,NY), SOL_PREV(NX,NY), SOL_PREV2(NX,NY) C update SOL: DO 20 J=1, NY DO 10 I=1, NX SOL(I,J) = 2*SOL_PREV(I,J) - SOL_PREV2(I,J) + & DT*DT*(( SOL_PREV ( I +1, J ) - 2* SOL_PREV ( I &, J ) + SOL_PREV ( I -1, J ))/( DX * DX ) + & ( SOL_PREV ( I , J +1) - 2* SOL_PREV ( I &, J ) + SOL_PREV ( I , J -1))/(dy*dy) ) 10 CONTINUE 20 CONTINUE RETURN END \end{Verbatim} \noindent If you are a Fortran 77 programmer and start using \code{fccp.py}, never forget that changes in the source code must be performed in files with suffix \code{.fcp}! %The files \code{macros.i} and \code{wave1.fcp} are found in %\code{src/misc}. \section{Experimenting with Optimization Flags} \label{adv:compile} Experimenting with a compiler's optimization flags is a frequently encountered task in high-performance computing. Measuring the efficiency of a wide range of flags, possibly on different platforms and with different compilers, requires accurate work. This is should not be left as a manual job for a human being. Automating the work in a script makes it easy to repeat the experiments, extend or modify them, try out new compilers and hardware, etc. In this section we shall develop a quite general script for running a benchmark problem with different compilers and compiler flags. A completely general tool for compiler experimentation would in some sense require us to reimplement a \code{make} program, which is far beyond scope. However, with hardly no extra work we can generalize a specific example and provide a tool that with minor modifications can be adapted to a wide range of problems. This is typical for scripting: even a short script can be made quite generic, and although the completely generic counterpart is beyond scope, the script can meet surprisingly many demands if you allow for some tuning of the statements in a new application. Imagine we have some files to be compiled and linked by a set of compilers. The compilers have some common flags and some flags that are specific to a certain compiler. We want to experiment with different settings of the compiler-specific flags, i.e., for each compiler we want to run through a set of different flag alternatives. The resulting executable is to be run in a specified benchmark problem. We need to measure the CPU time, and if possible, grab results from a profiler such as \code{gprof} or \code{prof}. The results should of course be nicely formatted for easy inspection. It should be easy to repeat tests on different platforms. The purpose is now to accomplish these tasks in a Python script. We restrict the attention to source code files written in the Fortran 77 language. Modifying the resulting script to treat C or C++ files is a trivial task. Although most applications are compiled and linked using a makefile, we will in the script issue the commands directly without using any make utility\footnote{Apart from checking a file's date and time, and thereby avoiding unnecessary recompilation, \code{make} does not perform much else than straightforward operating system commands. These are simpler to deal with in a script written in an easy-to-read language like Python. Avoiding recompilation is not a major issue anymore on today's fast machines.}. We introduce a set of common options for the compilation and for the linking step as well as a set of libraries to link with the application. A minimal specification of these options is \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] compile_flags = '-c' link_flags = '-o %s' % programname libs = '' \end{Verbatim} \noindent More advanced applications might need specifications of, e.g., include and library directories, like in this example: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] compile_flags = '-c -I %s/include' % os.environ['PREFIX'] link_flags = '-o %s -L %s/lib -L /usr/share/some/lib % \ (programname, os.environ['PREFIX']) libs = '-ladvanced_lib -lmylib' \end{Verbatim} \noindent The information about a specific compiler is stored in a dictionary with keys reflecting the name of the compiler, a description of the compiler, the common compile and link options, and a list of variable compile options. The latter data are subject to experimentation. Here is a definition of such a dictionary for GNU's Fortran 77 compiler \code{g77}: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] g77 = { 'name' : 'g77', 'description' : 'GNU f77 compiler, v2.95.4', 'compile_flags' : compile_flags + ' -pg', 'link_flags' : link_flags + ' -pg', 'libs' : libs, 'test_flags' : ['-O0', '-O1', '-O2', '-O3','-O3 -ffast-math -funroll-loops',], 'platform_specific_compile_flags' : {}, 'platform_specific_link_flags' : {}, 'platform_specific_libs' : { c1 : '-lf2c' }, } \end{Verbatim} \noindent According to the \code{test\_flags} key, we want to experiment with different levels of optimization (\code{-O0}, ..., \code{-O3}) and special optimization flags (e.g., \code{-ffast-math}). We will typically loop over the \code{test\_flags} values and compile and run the benchmark problem for each value. On a Sun system, we may want to test Sun's native F77 compiler: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] # Sun f77 compiler: Sunf77 = { 'name' : 'f77', 'description' : 'Sun f77 compiler, v5.2', 'compile_flags' : compile_flags, 'link_flags' : link_flags, 'libs' : '', 'test_flags' : ['-O0', '-O1', '-fast',], 'platform_specific_compile_flags' : {}, 'platform_specific_link_flags' : {}, 'platform_specific_libs' : {}, } \end{Verbatim} \noindent The next step is to attach a list of compilers, where each compiler is represented by a dictionary as exemplified above, to a dictionary holding the various platforms where we want to perform the tests. To this end, we declare a dictionary structure \code{cd} (compiler data), whose keys are the name of specific machines. For example, \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] cd = {} c1 = 'basunus.ifi.uio.no' # computer 1 cd[c1] = {} cd[c1]['data'] = 'Linux 2.2.15 i686, 500 MHz, 128 Mb' c2 = 'skidbladnir.ifi.uio.no' # computer 2 cd[c2] = {} cd[c2]['data'] = 'SunOS 5.7, sparc Ultra-5_10' \end{Verbatim} \noindent \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] cd[c1]['compilers'] = [g77] cd[c2]['compilers'] = [g77, Sunf77] \end{Verbatim} \noindent The machine names are taken to be identical to the contents of the \code{HOST} environment variable. In this way, we can easily extract the name of the current computer inside the script. %Note that we use the name of a particular machine rather than the name %of, e.g., the operating system. A typical experiment with the compilers and flags on a computer can be sketched as follows. \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] # run through the various compiler and options for the # present host: host = os.environ['HOST'] for compiler in cd[host]['compilers']: for optimization_flags in compiler['test_flags']: # construct compilation command from # compiler['name'], # compiler['compile_flags'], # optimization_flags, # compiler['platform_specific_compile_flags'][host], # source code filenames <compile...> <link (similar construction as the compile command)...> <run problem...> <report timing results...> \end{Verbatim} \noindent The CPU-time measurement can be performed by calling \code{os.times} before and after running the benchmark program. More detailed information about the efficiency of the code can be obtained from a profiler, such as \code{gprof} or \code{prof}. Here we demonstrate how to run \code{gprof} or \code{prof} and grab the sorted table of the CPU time spent in each of the program's functions. If the table is long, we display only the first 10 functions: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] def run_profiler(programname): """grab data from gprof/prof output and format nicely""" # gprof needs gmon.out (from the last execution of programname) if os.path.isfile('gmon.out'): # run gprof: if not findprograms(['gprof']): print 'Cannot find gprof' return res = os.popen('gprof ' + programname) lines = res.readlines() failure = res.close() if failure: print 'Could not run gprof'; return # grab the table from the gprof output: for i in range(len(lines)): if re.search(r'\%\s+cumulative\s+self', lines[i]): startline = i break try: # we are interested in the 10 first lines of the table, # but if there is a blank line, we stop there stopline = 10 i = 0 for line in lines[startline:startline+stopline]: if re.search(r'^\s*$', line): stopline = i; break i = i + 1 table = ''.join(lines[startline:startline+stopline]) print table os.remove('gmon.out') # require new file for next run... except: print 'Could not recognize a table in gmon.out...'; return elif os.path.isfile('mon.out'): # run prof: if not findprograms(['prof']): print 'Cannot find gprof' return res = os.popen('prof ' + programname) lines = res.readlines() failure = res.close() if failure: print 'Could not run prof'; return for line in lines[0:10]: print line, else: # no gmon.out or mon.out, cannot run gprof or prof print programname,\ 'was not compiled in profiling mode (-pg or -p?)' return \end{Verbatim} \noindent The \code{findprograms} functions are found in the module \code{funcs} in the \code{py4cs} package. A possible command-line interface to such a script can have the following items \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] programname file1.f file2.f ... inputfile comment \end{Verbatim} \noindent This implies compiling and linking \code{file1.f}, \code{file2.f}, and so, then running \code{programname < inputfile}, and finally reporting the CPU time in an output line containing the \code{comment} about what type of test we perform. Extracting the command-line information is trivial using Python's convenient subscripting syntax: \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] programname = sys.argv[1] inputfile = sys.argv[-2] comment = sys.argv[-1] f77files = sys.argv[2:-1] \end{Verbatim} \noindent A specific application of a script of the type of script described above is found in \begin{Verbatim}[fontsize=\footnotesize,tabsize=8,baselinestretch=0.85,defineactive=\def\#{\itshape \CommentChar},fontfamily=tt,xleftmargin=7mm] src/app/wavesim2D/F77/compile.py \end{Verbatim} \noindent The Fortran 77 code in the \code{src/app/wavesim2D/F77} directory solves the two-dimensional wave equation \[ {\partial^2u\over\partial t^2} = {\partial\over\partial x}\left( \lambda (x,y){\partial u\over\partial x}\right) + {\partial\over\partial y}\left( \lambda (x,y){\partial u\over\partial y}\right) \] by an explicit finite difference scheme over a uniform, rectangular grid. We can think of this equation as modeling 2D water waves. Then $u$ is the water surface elevation, and $\lambda (x,y)$ represents the bottom topography. The \code{README} file in this directory contains an overview of the code files and the documentation of the involved mathematics and numerics. The finite difference scheme is coded in a separate file, using a C preprocessor macro to simplify the coding and future modifications. A script from the previous section transforms an F77 file with preprocessor directives to standard F77 code. In the subdirectory \code{versions} there are several different versions of the code, aimed at testing various high-performance computing aspects: \bit \item file writing versus pure number crunching, \item row-wise versus column-wise traversal of arrays, \item representing $\lambda$ by an array versus calling functions, \item the effect of if-tests inside long do-loops. \eit A complete implementation of the type of script explained in this section is found in the file \code{compile.py}. This script is central for testing the efficiency of the different coding techniques used in the files in the \code{versions} subdirectory. A simple Bourne shell script \code{runall.sh} calls up \code{compile.py} for the different versions of the code. This makes it trivial to test the efficiency of all versions on different platforms, compilers, and optimization flags. The \code{ranking.py} script extracts the CPU time measurements from the output of \code{runall.sh} and writes out the relevant lines in sorted order. This acts as a kind of summary of the tests. \end{document}
{ "alphanum_fraction": 0.708030491, "avg_line_length": 40.0781527531, "ext": "tex", "hexsha": "6ccdcb3c3423365335d0389afa0ab7e71c6317ae", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-04-22T10:23:23.000Z", "max_forks_repo_forks_event_min_datetime": "2015-07-13T10:04:10.000Z", "max_forks_repo_head_hexsha": "e048756feca67197cf5f995afd7d75d8286e017b", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "sniemi/SamPy", "max_forks_repo_path": "sandbox/src1/TCSE3-3rd-examples/src/app/wavesim2D/F77/doc/scripts.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e048756feca67197cf5f995afd7d75d8286e017b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "sniemi/SamPy", "max_issues_repo_path": "sandbox/src1/TCSE3-3rd-examples/src/app/wavesim2D/F77/doc/scripts.tex", "max_line_length": 144, "max_stars_count": 5, "max_stars_repo_head_hexsha": "e048756feca67197cf5f995afd7d75d8286e017b", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "sniemi/SamPy", "max_stars_repo_path": "sandbox/src1/TCSE3-3rd-examples/src/app/wavesim2D/F77/doc/scripts.tex", "max_stars_repo_stars_event_max_datetime": "2021-04-22T10:23:12.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-28T14:12:28.000Z", "num_tokens": 6156, "size": 22564 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{pgfplots} \usepackage{fancyhdr} \usepackage{enumitem} \usepackage{tikz} \usepackage{xparse} \usepackage{siunitx} \pgfplotsset{width=10cm,compat=1.9} \pagestyle{fancy} \fancyhf{} \lhead{Steven Glasford} \chead{Homework 1.4} \rhead{Page \thepage} \title{Homework 1.4} \author{Steven Glasford} \date{\parbox{\linewidth}{\centering% \today\endgraf\medskip Math-451-M001}} \newcommand{\rpm}{\sbox0{$1$}\sbox2{$\scriptstyle\pm$} \raise\dimexpr(\ht0-\ht2)/2\relax\box2 } \newlist{steps}{enumerate}{1} \setlist[steps, 1]{label = Step \arabic*:} \ExplSyntaxOn \newcommand*{\prlen}[1]{% % round to 1 digit: \pgfmathparse{round(10)/10.0}% %\pgfkeys{/pgf/number format/precision=1} %\pgfmathresult \pgfmathprintnumber[fixed, precision=2]{\pgfmathresult} } \ExplSyntaxOff \begin{document} \maketitle I choose to do problems 3 and 9 from the options of 3, 4, 5 and 9. \section{Problem 3} \begin{enumerate}[label=\alph*] \item \begin{tikzpicture} \begin{axis}[ axis lines = left, xlabel = Time in $t$ day, ylabel = Price per pig $P(t)$, ] \addplot [ domain=0:15, samples=100, color=red, ]{.65 - .01*x + .00004*x^2}; \addlegendentry{New: $.65-.01t+.00004t^2$} \addplot[ domain=0:15, samples=100, color=blue, ]{.65-.01*x}; \addlegendentry{Old: $.65-.01*x$} \end{axis} \end{tikzpicture} The graph above shows the price per pig using both the old linear model (blue) and the newer model (the red line). The red line is more accurate to the actual price per pig in real life. But as is obvious in the graph the values for the old equation are very similar to the more accurate model within the appropriate time domain. \item \begin{center} \begin{tabular}{ |c|c|c| } \hline Variables & Constants & Assumptions \\ \hline $t$ - Time (Days) & $w_0 = 200$ initial weight (pounds) & $w=w_0+5t$ \\ $f$ - profit (in dollars) & & $p=.65-.01t+.00004t^2$ \\ $w$ - Weight (pounds) && $c=.45t$ \\ $p$ - Price per pound && $r=p*w$\\ $c$ - cost (in dollars) && $f=r-c$\\ $r$ - Revenue (in dollars) &&\\ \hline \end{tabular} \end{center} If we want to maximize $f(t)$ then we should try to add all of the assumptions into a single equation, this way all of the assumptions are considered. We will try to use $t$ as the isolating factor. If $$f=r-c$$ \parbox{\linewidth}{\centering% $r=pw$ \hspace*{3cm} $c=.45t$ \endgraf \bigskip $f=pw-.45t$ \endgraf\bigskip $p=.65-.01t$ \hspace*{3cm} $w=w_0+5*t$ \endgraf \bigskip $f=(.65-.01t+.00004t^2)(w_0+5t)-.45t$ \endgraf\bigskip $w_0=200$ \endgraf \bigskip $f=(.65-.01t+.00004t^2)(200+5t)-.45t$ \endgraf\bigskip } Which simplifies to:$$f(t)=.0002t^3-.042t^2+.8t+130$$ Now we need to take the derivative of $f(t)$ and find where it is equal to zero. $$\frac{df}{dt}=\frac{3t^2}{5000}-\frac{21t}{250}+\frac{4}{5}$$ \begin{tikzpicture} \begin{axis}[ axis lines = left, xlabel = Time in $t$ day, ylabel = $y$, ] \addplot[ domain=0:20, samples=100, color=blue, ]{(3*x^2-420*x+4000)/5000}; \addlegendentry{$\frac{df}{dt}$} \draw[ultra thin] (axis cs:\pgfkeysvalueof{/pgfplots/xmin},0) -- (axis cs:\pgfkeysvalueof{/pgfplots/xmax},0); \end{axis} \end{tikzpicture} \endgraf Using the quadratic equation we try to determine the extreme somewhere between 8 and 14, as that point will be a maximum, this is known since the derivative is a quadratic and that point the derivative goes from positive to negative, indicating a maxima. The other root would in turn be a minima. $$\frac{df}{dt}=0=\frac{3t^2-420t+4000}{5000}$$ $$t=\frac{10(21+\sqrt{321})}{3},\frac{10(21-\sqrt{321})}{3}$$ $$t \approx \pgfmathparse{(10*(21+321^(1/2)))/3}\pgfmathresult , \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult $$ \textbf{Therefore, the best day to sell your pig is at about 10 days.} \item \underline{Sensitivity analysis} \endgraf $$p(t)=.65-.01t+.00004t^2$$ $$f(t)=.0002t^3-.042t^2+.8t+130$$ $$y=(.65-.01x+xt^2)(200+5x)-.45x$$ $$y=5rx^3-.05x^2+200rx^2+.8x+130=0$$ $$\frac{dy}{dx}=15rx^2+400rx-\frac{x}{10}+\frac{4}{5}=0$$ $$x=\frac{-4000r+1+ 10\sqrt{160000r^2-128r+1/100}}{300r}$$ $$\frac{dx}{dr}=\frac{-6400r+1+ \sqrt{16000000r^2-12800r+1}}{300r^2\sqrt{16000000r^2-12800r+1}}$$ \begin{center} \begin{tabular}{ |c|c|c| } \hline r & x & $\frac{\Delta x}{\Delta r} * 100$ \\ \hline .00002 & 297.709385518& %\pgfkeys{/pgf/fpu} \pgfmathparse{}\pgfmathresult %\edef\tmp{\pgfmathresult} %\pgfmathresult %\pgfkeys{/pgf/fpu=false} & %1/(-6400*.00004+1+sqrt(16000000*.00004^2-128*.00004+1))/(300*.00004^2*sqrt(16000000*.00004^2-12800*.00004+1)) %(-6400*r+1+\sqrt{16000000*r^2-128*r+1})/(300*r^2*\sqrt{16000000*r^2-12800r+1}) \\ .00003 & 185.997481072 & -1117119044.46 %\pgfmathparse{((-4000*.00003+1+10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003)-(-4000*.00002+1+10*(160000*.00002^2-128*.00002+1/100)^(1/2))/(300*.00002))/(.00003-.00002)*100}\pgfmathresult & \\ .00004 &129.721576224&-562759048.485 % \pgfmathparse{(-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)}\pgfmathresult & % \pgfmathparse{(-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)}\pgfmathresult & % \pgfmathparse{((-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)-(-4000*.00003+1+10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003))/(.00004-.00003)*100}\pgfmathresult & % \pgfmathparse{((-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004)-(-4000*.00003+1-10*(160000*.00003^2-128*.00003+1/100)^(1/2))/(300*.00003))/(.00004-.00003)*100}\pgfmathresult \\ .00005 & 95.4970354689 &-342245407.55 %\pgfmathparse{(-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)}\pgfmathresult & % \pgfmathparse{(-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)}\pgfmathresult & % \pgfmathparse{((-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)-(-4000*.00004+1+10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004))/(.00005-.00004)*100}\pgfmathresult& % \pgfmathparse{((-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005)-(-4000*.00004+1-10*(160000*.00004^2-128*.00004+1/100)^(1/2))/(300*.00004))/(.00005-.00004)*100}\pgfmathresult \\ .00006 &72.1191645491&-233778709.199 %\pgfmathparse{(-4000*.00006+1+10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)}\pgfmathresult & % \pgfmathparse{(-4000*.00006+1-10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)}\pgfmathresult & % \pgfmathparse{((-4000*.00006+1+10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)-(-4000*.00005+1+10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005))/(.00006-.00005)*100}\pgfmathresult& % \pgfmathparse{((-4000*.00006+1-10*(160000*.00006^2-128*.00006+1/100)^(1/2))/(300*.00006)-(-4000*.00005+1-10*(160000*.00005^2-128*.00005+1/100)^(1/2))/(300*.00005))/(.00006-.00005)*100}\pgfmathresult \\ \hline \end{tabular} \end{center} \textbf{Therefore the best sort of sensitivity is somewhere between .00004 and .00005} %% \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult \item The robustness is fairly good since the values obtained from both the linear and the quadratic models are roughly similar. \end{enumerate} \section{Problem 9} \begin{enumerate}[label=\alph*] \endgraf \item\endgraf\bigskip \begin{steps} \endgraf \item \emph{Ask the Question, determine variables, constants, assumptions}\endgraf \begin{center} \begin{tabular}{ |c|c|c|c| } \hline Variables & Constants & Assumptions \\ \hline $s$ - Subscribers & $i_0 = 1.50$ & $f=sp$\\ $p$ - Subscription price & $s_0=80000$&$s=s_0-50000(p-p_0)$\\ $f$ - Profits &&$p\geq0$\\ && $s\geq0$\\ \hline \end{tabular} \end{center} \item \emph{Select the model}\endgraf One-variable optimization \item \emph{Formulate the model} \endgraf $$f=sp$$ $$s=s_0-50000(p-p_0)$$ $$f=p(s_0-50000(p-p_0))$$ \parbox{\linewidth}{\centering% $s_0=80000$ \hspace*{3cm} $p_0=1.50$ \endgraf \bigskip } $$f=p(80000-50000(p-1.5))$$ or simplified $$f=-50000p^2+155000p$$ \item \emph{Solve the model}\endgraf Find the extrema for f: $$\frac{df}{dp}=0=-100000p+155000$$ $$p=1.55$$ Determine if 1.55 is a max or min: \begin{tikzpicture} \begin{axis}[ axis lines = left, xlabel = Price per paper, ] \addplot [ domain=0:2, samples=100, color=red, ]{-100000*x+155000}; \addlegendentry{$\frac{df}{dp}$} \draw[ultra thin] (axis cs:\pgfkeysvalueof{/pgfplots/xmin},0) -- (axis cs:\pgfkeysvalueof{/pgfplots/xmax},0); \end{axis} \end{tikzpicture} Since the derivative of $f$ is going from positive to negative at the point 1.55, \textbf{$p=1.55$ is a Maximum.}\bigskip \item \emph{Answer the question}\endgraf The best price to sell the newspaper is \$1.55 for a total of \$$120125$ per week, and a total of $77500$ subscribers. \end{steps} %% \pgfmathparse{(10*(21-321^(1/2)))/3}\pgfmathresult \item \endgraf \bigskip \emph{What if the newspaper lost a different number of subscribers, other than 5000?} $$f=p(80000-r*10(p-1.5))$$ $$\frac{df}{dp}=0=-20rp+15r+80000$$ $$p=\frac{80000+15r}{20r}$$ replace $p$ in the $f$ equation: $$f=\frac{80000+15r}{20r}\left(80000-10r\left(\frac{80000+15r}{20r}-1.5\right)\right)$$ Which doesn't really simplify down to anything worth mentioning. \begin{center} \begin{tabular}{ |c|c| } \hline Lost Subscribers ($r$) & Max Profit \\ \hline 3000 &\$130208.33\\ 4000 &\$122500.00\\ 5000 &\$120125.00\\ 6000 &\$120416.67\\ 7000 &\$122232.14\\ \hline \end{tabular} \end{center} \item $$f=\frac{80000+15r}{20r}\left(80000-10r\left(\frac{80000+15r}{20r}-1.5\right)\right)$$ $$\frac{df}{dr}=\frac{45r^2-1280000000}{8r^2}$$ Sensitivity=$\frac{df}{dr}*\frac{r}{x}=S(p,n)$ $$S(p,n)=\frac{45n^2-1280000000}{8n^2}\left(n/\left(\left(\frac{80000+15n}{20n}\left(80000-10n\left(\frac{80000+15n}{20n}-1.5\right)\right)\right)\right)\right)$$ \item \emph{The Newspaper shouldn't need to change its prices.} The price is already near the optimal rate, but the optimal rate is just \$125 more than the basic, which is basically nothing when considering the amount of profit they received. \end{enumerate} \end{document}
{ "alphanum_fraction": 0.5592927632, "avg_line_length": 44.8708487085, "ext": "tex", "hexsha": "80f797446ddced39132c651dc8e8917c7d5a26be", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c0fa475cb08a40debda6c106e8ddf8b44dab1060", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "stevenglasford/MATH451SDSMT", "max_forks_repo_path": "HW1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c0fa475cb08a40debda6c106e8ddf8b44dab1060", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "stevenglasford/MATH451SDSMT", "max_issues_repo_path": "HW1.tex", "max_line_length": 333, "max_stars_count": null, "max_stars_repo_head_hexsha": "c0fa475cb08a40debda6c106e8ddf8b44dab1060", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "stevenglasford/MATH451SDSMT", "max_stars_repo_path": "HW1.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4349, "size": 12160 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% HEADER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %\documentclass[a4paper,twoside,12pt]{report} \documentclass{article} \usepackage[left=3cm,top=3cm,bottom=3cm,right=3cm,nofoot]{geometry} % Alternative Options: % Paper Size: a4paper / a5paper / b5paper / letterpaper / legalpaper / executivepaper % Duplex: oneside / twoside % Base Font Size: 10pt / 11pt / 12pt \usepackage{fullpage} %% Packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage[USenglish]{babel} %francais, polish, spanish, ... \usepackage[T1]{fontenc} \usepackage[ansinew]{inputenc} \usepackage{lmodern} \usepackage{ifthen} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amsfonts} \usepackage{setspace} %\onehalfspacing \singlespacing %\usepackage{a4wide} %%Smaller margins = more text per page. %\usepackage{fancyhdr} %%Fancy headings %\usepackage{pst-all} \usepackage{graphicx} \usepackage{longtable} \usepackage{verbatim} \newcommand{\reg}[1]{\texttt{#1}} \renewcommand{\labelitemi}{$\triangleright$} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% DOCUMENT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \pagestyle{plain} %Now display headings: headings / fancy / ... %% Chapters %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{X-Space (release 7)} X-Space is an aerospace simulator based on X-Plane flight simulator. It is available as a plugin, and as a dedicated server (for calculating spaceflight), also as an Orbiter space simulator addon (to provide networking). \section{Features} (new features in \textbf{bold}) \begin{itemize} \item \textbf{Provides information about partial concentrations and densities of atmospheric gases} \item \textbf{Clouds visible from orbit (procedural clouds based on realtime weather)} \item \textbf{Different visual effects based on fuel used in the rocket engine} \item \textbf{Detailed engine performance simulation (variation in thrust with external pressure)} \item \textbf{Support for multiple stages/multi-part vessels} \item \textbf{Visual effects from heating (glow due to heat radiation)} \item \textbf{Earth gravitational field simulation (non-spherical gravity based on EGM96 model)} \item \textbf{Support for extra fuel tanks (or additional weights)} \item Orbital flight simulation (uses custom physics engine to override X-Plane physics at high altitudes) \item Detailed atmospheric model (including exosphere, high-altitude atmospheric drag) \item Atmospheric scattering visible from orbit (for X-Plane 9, X-Plane 10 without HDR) \item Support for any amount of custom engines (reaction control engines, additional rocket engines) \item Advanced networking support (multiplayer) \item Fuselage drag simulation (drag from capsules, hypersonic vessels) \item Heating simulation (covers a wide range of external pressures and velocities) \item Earth magnetic field simulation (fairly precise magnetic field data from WMM) \item Material simulation (simulates properties of various materials the spacecraft is constructed from) \item Extra camera views (relative to vessel reference point, vessel center of mass) \item Vertical launch pads \item Simulates planet rotation \item Publishes a large amount of variables (datarefs) accessible by other plugins \end{itemize} \twocolumn \section{Installing} \textbf{Please remove the old version of X-Space if it's present} Just copy contents of folders in this archive into corresponding folders in X-Plane 9 (or X-Plane 10) folder. The folders must be \textit{merged} together (not replaced!). To uninstall it just delete the following folders: \begin{itemize} \item \reg{$\backslash$X-Plane 9$\backslash$Resources$\backslash$plugins$\backslash$x-space} \item \reg{$\backslash$X-Plane 9$\backslash$Aircraft$\backslash$XSAG} \end{itemize} \section{Authorship} \begin{itemize} \item \textbf{X-Space} \copyright \ 2009-2012 by Black Phoenix \item \textbf{XGDC OS} \copyright \ 2008-2012 by Black Phoenix \item \textbf{Simple OGL Image Library} by Jonathan Dummer \item \textbf{Scriptable Avionics Simulation Library} by A.A.Babichev \item \textbf{Lua 5.1.4} \copyright \ 1994-2011 PUC-Rio \item \textbf{WMM model} implementation by Manoj C Nair and Adam Woods \item \textbf{NRLMSISE-00 model} implementation by Dominik Brodowski \item \textbf{RV-550 Ares-1} with \textbf{Orion} capsule model by Curt Boyll % \item \textbf{RV-560 Zenit} with \textbf{Orion} capsule model by Curt Boyll and Black Phoenix \item \textbf{SV-200 UHLSS} model by Jeff Scott \end{itemize} \section{Version Information} \begin{itemize} \item X-Space (release 7b) \item RV-550 Ares-1/Orion (XSAG SVN R326) % \item RV-560 Zenit/Orion (XSAG SVN R326) \item SV-200 UHLSS (XSAG SVN R326) \item XGDC OS (XSAG SVN R326) \item WMM (WMM2010) \end{itemize} \section{Version History} \subsection{Changes from release 7a} \begin{itemize} \item Fixed Mac OS version of the plugin \end{itemize} \subsection{Changes from release 6a} \begin{itemize} \item Added new rendering features \item Added non-spherical gravity \item Added configuration window \item Can use ACF files as stages now \item No longer supports weapons as drop-stages \item Rewritten from scratch internally \end{itemize} \subsection{Changes from release 5a} \begin{itemize} \item Fixed accelerometer readings when in inertial physics \item Fixed Earth rotation pushing aircraft around runway \item Added material editor (for making drag model) \item Added SV-201 vehicle (as a demonstration) \item Added on-orbit drag \end{itemize} \subsection{Changes from release 4d} \begin{itemize} \item Added multiplayer \item Fixed memory related crash when loading aircraft after loading rocket \item Fixed unresponsive controls when loading aircraft after loading rocket \item Fixed rocket start with "don't start with engines running" option \item Added datarefs to check for existance of any body \end{itemize} \section{Reference} \subsection{Extra key functions} \begin{itemize} \item \reg{SHIFT+]}: Locked-orientation spot around vehicle \item \reg{SHIFT+[}: Spot camera around vehicle \item \reg{CTRL+LEFT}: Previous object (cameras) \item \reg{CTRL+RIGHT}: Next object (cameras) \item \reg{SPACE}: Release vessel from launch pad \end{itemize} %== Menu options == %=== Configure plugin === %=== Start multiplayer session... === %=== End multiplayer session... === %=== Edit mount offsets... === %=== Edit hull... === %=== Heating simulation === %=== Materials database === \end{document}
{ "alphanum_fraction": 0.7082169631, "avg_line_length": 39.783625731, "ext": "tex", "hexsha": "12b1a81851228d5758e9ca8d58c9f0bfe4b6ff1b", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2020-11-04T04:07:52.000Z", "max_forks_repo_forks_event_min_datetime": "2016-04-22T05:41:00.000Z", "max_forks_repo_head_hexsha": "337839e7d852c4c36203eb1ac5d01277a5782f5a", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "PhoenixBlack/X-Space", "max_forks_repo_path": "docs/manual/readme.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "337839e7d852c4c36203eb1ac5d01277a5782f5a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "PhoenixBlack/X-Space", "max_issues_repo_path": "docs/manual/readme.tex", "max_line_length": 220, "max_stars_count": 5, "max_stars_repo_head_hexsha": "337839e7d852c4c36203eb1ac5d01277a5782f5a", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "PhoenixBlack/X-Space", "max_stars_repo_path": "docs/manual/readme.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-21T17:29:41.000Z", "max_stars_repo_stars_event_min_datetime": "2016-08-06T04:02:32.000Z", "num_tokens": 1763, "size": 6803 }
\section*{GDSMill} \label{sec:gdsMill_permission} OpenRAM uses gdsMill, a GDS library written by Michael Wieckowski at the University of Michigan. Michael gave us complete permission to use the code. Since then, we have made several bug and performance enhancements to gdsMill. In addition, gdsMill is no longer available on the web, so we distribute it along with OpenRAM. \begin{verbatim} From: Michael Wieckowski <[email protected]> Date: Thu, Oct 14, 2010 at 12:49 PM Subject: Re: GDS Mill To: Matthew Guthaus <[email protected]> Hi Matt, Feel free to use / modify / distribute the code as you like. -Mike On Oct 14, 2010, at 3:07 PM, Matthew Guthaus wrote: > Hi Michael (& Dennis), > > A student and I were looking at your GDS tools, but > we noticed that there is no license. What is the license? > > Thanks, > > Matt \end{verbatim}
{ "alphanum_fraction": 0.7488207547, "avg_line_length": 24.9411764706, "ext": "tex", "hexsha": "32cf68ddf90be61cb3db971d02401466be096622", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-01-23T07:12:52.000Z", "max_forks_repo_forks_event_min_datetime": "2020-01-23T07:12:52.000Z", "max_forks_repo_head_hexsha": "abf47bab50adb48337c59b72ccd6023c1999f3fc", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "panicmarvin/OpenRAM", "max_forks_repo_path": "docs/gdsmill.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "abf47bab50adb48337c59b72ccd6023c1999f3fc", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "panicmarvin/OpenRAM", "max_issues_repo_path": "docs/gdsmill.tex", "max_line_length": 70, "max_stars_count": null, "max_stars_repo_head_hexsha": "abf47bab50adb48337c59b72ccd6023c1999f3fc", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "panicmarvin/OpenRAM", "max_stars_repo_path": "docs/gdsmill.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 245, "size": 848 }
\section[Asymptotic normality]{Theoretical results supporting the asymptotically normal \oos\ statistic} \label{sec:1} This section presents the new \oos\ statistic; first we give an informal motivation of the statistic, then present the paper's key assumptions in Section~\ref{sec:1a} and present our formal theoretical results in Section~\ref{sec:1b}. Suppose for now that a researcher is interested in predicting the target variable $y_{t+1}$ with a vector of regressors $x_t$, that $v_t$ is another random process that is believed to potentially contain information about $y_{t+1}$, and that $(y_t, x_t, v_t)$ is stationary and weakly dependent. In addition, let $\btrue = (\E x_t x_t')^{-1} \E x_t y_{t+1}$ be the pseudotrue coefficient for the regression of $y_{t+1}$ on $x_t$ and define $\ep_{t+1} = y_{t+1} - x_t'\btrue$. If this linear model is correctly specified, then $\ep_{t+1}$ is an \mds\ with respect to $\sigma((x_t, v_t, y_t), (x_{t-1}, v_{t-1}, y_{t-1}),\dots)$ and we can see immediately that \begin{equation} \label{eq:1} \oclt{t} \ep_{t+1} (v_t - x_t'\btrue) \end{equation} obeys an \mds\ \clt\ and is asymptotically normal as $P \to \infty$,% \footnote{This claim assumes that the asymptotic variance of the sample average is uniformly positive, a requirement that we will address in Section~\ref{sec:1b}.} % with $R$ an arbitrary starting value and $P = T - R$. Straightforward algebra \citep{ClW:07} shows that \begin{equation} \label{eq:2} \tfrac{1}{\sqrt{P}} \osum{t} \ep_{t+1} (v_t - x_t'\btrue) = \tfrac{1}{2 \sqrt{P}} \osum{t} \Big[(y_{t+1} - x_t\btrue)^2 - (y_{t+1} - v_t)^2 + (x_t'\btrue - v_t)^2 \Big] \end{equation} almost surely. \citet{ClW:06,ClW:07} base their \oos\ statistics on the \allcaps{RHS} of Equation~\eqref{eq:2}, but use a second forecast of $y_{t+1}$ as $v_t$. (Call it $\yh_{t+1}$.) They use a rolling window of length $R$ to estimate $\yh_{t+1}$,% \footnote{Making $\yh_{t+1}$ a function of $y_t, x_{t-1}, z_{t-1} \dots, y_{t-R+1}, x_{t-R}$ and $z_{t-R}$, where $z_t$ is another weakly dependent random process} % and $R$ is kept finite as $T \to \infty$ so that $\yh_{t+1}$ inherits the weak dependence properties of the variables used to estimate it. Using a finite window prevents the degeneracy that can arise when comparing nested models out-of-sample (see \citealp{ClM:01}, and \citealp{Mcc:07}), so the conditional variance of the \oos\ average remains positive and the average obeys a \clt.% \footnote{This approach was first introduced by \citet{GiW:06}.} % \citet{ClW:06,ClW:07} propose using this as a test of whether the benchmark is correctly specified. In their 2006 paper, Clark and West assume that the coefficients on the benchmark model, $\btrue$, are zero under the null, making $\ep_{t+1}$ observed directly. This restriction is relaxed in their 2007 paper, where $\btrue$ is unknown and estimated with the same length-$R$ rolling window as $\yh_{t+1}$. Now the estimated linear model's prediction errors, $\eph_{t+1}$, replace $\ep_{t+1}$ in the \oos\ test statistic. Unfortunately, $\eph_{t+1}$ is not an \mds\ even when $\ep_{t+1}$ is, so the statistic is no longer asymptotically mean-zero normal, even though this approximation performs well in simulations. Since the window length is finite, the estimator of $\btrue$ does not converge to $\btrue$. This paper proposes using the same basic \oos\ statistic, but using a recursive window to estimate $\btrue$ and produce $\eph_{t+1}$: \begin{align} \label{eq:3} \bh_t &= \Big(\sum_{s=1}^{t-1} x_{s} x_{s}'\Big)^{-1} \sum_{s=1}^{t-1} x_{s} y_{s+1} && \text{and} & \eph_{t+1} &= y_{t+1} - x_t'\bh_t \end{align} for each $t$.% \footnote{The matrix inversion in $\bh_t$ can be replaced with a pseudo-inverse if necessary for some values of $t$ without changing the forecast.} % \citepos{Wes:96} Theorem 4.1 implies that \begin{equation*} \oclt{t} \Big[(y_{t+1} - x_t\bh_t)^2 - (y_{t+1} - v_t)^2 + (x_t'\bh_t - v_t)^2 \Big] \end{equation*} is asymptotically normal with mean zero under Clark and West's \mds\ null for fairly arbitrary processes $v_t$, as long as $v_t$ is weakly dependent and the \oos\ statistic has uniformly positive variance. Just as in \citet{ClW:06,ClW:07}, these conditions are ensured if $v_t$ is another forecast of $y_{t+1}$ based on a fixed-length rolling window. So far, we have presented an especially simple version of the result to make the intuition as clear as possible. The next section lists the specific assumptions for the more general case and defines additional notation. \subsection{Theoretical assumptions} \label{sec:1a} Consider the following environment. There is a single linear benchmark model of the target variable, $y_{t+1}$: \begin{equation}\label{eq:4} y_{t+1} = x_t'\beta + \ep_{t+1}, \quad t = 1,\dots,T-1 \end{equation} where $\beta$ is an unknown vector of parameters and $x_t$ is an observed vector of predictors. The parameter $\beta$ is estimated with \ols\ using a recursive window as described by Equation~\eqref{eq:3}. The alternative model is denoted $\yh_{t+1}$ and is estimated with a rolling window of length $R$. The main conditions on the \dgp\ are summarized in the first assumption. The weak dependence and moment conditions are standard. The assumption of strict stationarity is stronger than necessary in practice --- once the alternative forecasting method is known, it is only necessary that the \oos\ adjusted loss difference be weak stationary, and even that can be relaxed further --- but this stronger assumption ensures that the results hold generally. \phantomsection \addcontentsline{toc}{subsubsection}{Assumption \ref{a1}} \begin{asmp}\label{a1}% The data are generated by the relationship \begin{equation} y_{t+1} = x_t'\btrue + \ep_{t+1} \end{equation} for $t=1,2,\dots$, for some value $\btrue$, with $\E x_t \ep_{t+1} = 0$, $\E \ep_{t+1}^2 > 0$, and $\E x_t x_t'$ positive definite for all $t$. Also assume that there is an additional sequence of random vectors $z_t$ and the process $(\ep_{t+1}, x_t, z_t)$ is stationary and strong mixing of size $-r/(r-2)$ or uniform mixing of size $-r/(2r-2)$, for $r > 2$. \end{asmp} The next assumption defines the forecasting models and adds additional constraints to the \dgp. \phantomsection \addcontentsline{toc}{subsubsection}{Assumption \ref{a3}} \begin{asmp}\label{a3}% The benchmark forecast is $x_t'\bh_t$, where $\bh_t$ is constructed with a recursive window according to~\eqref{eq:3}. The alternative forecast satisfies \begin{equation} \yh_{t+1} = \psi(y_t,z_t,\dots,y_{t-R+1}, z_{t-R+1}) \end{equation} where $\psi$ is a known measurable function and the window length, $R$, remains finite as $T \to \infty$. Moreover, the vector $(\ep_{t+1}, x_t, \yh_{t+1})$ has uniformly bounded $2 r$ moments where $r$ is first defined in Assumption~\ref{a1}. \end{asmp} The requirement that the alternative forecast satisfies moment conditions, rather than the underlying predictors $z_t$, is somewhat unappealing but necessary. The function $\psi$ that generates these forecasts is otherwise nearly unrestricted, so even well-behaved predictors could produce arbitrarily badly-behaved forecasts. For example, if \begin{equation*} z_t \sim \iid~\bernoulli(1/2), \end{equation*} setting $\psi(y_t, z_t) = 1/z_t$ would prevent a \clt\ from holding since the forecast equals positive infinity with probability $1/2$. It is easy to construct less obvious examples of problematic functions as well. Assumption~\ref{a3} implicitly rules out these functional forms by imposing moment conditions on the alternative models' forecasts. Our next assumption ensures that the asymptotic variance of the \oos\ average is positive. \phantomsection \addcontentsline{toc}{subsubsection}{Assumption \ref{a4}} \begin{asmp}\label{a4}% The asymptotic variance-covariance matrix \begin{equation} \var \Bigg( \oclt{t} \begin{pmatrix} x_t \\ \yh_{t+1} \end{pmatrix} \ep_{t+1} \Bigg) \end{equation} is uniformly positive definite (in $T$). \end{asmp} This assumption is much less restrictive than in \cite{Wes:96}. As in \cite{GiW:06} and \citet{ClW:06,ClW:07}, the assumption only serves to rule out pathological cases --- for example, letting the alternative model consist of only the first regressor of the benchmark. In \citet{Wes:96}, this assumption is a restriction on the \dgp\ as well as the forecasting models, but in this paper it is a restriction only on the models. The final assumption restricts the class of \hac\ variance estimators we will consider. We use the same class of estimators studied by \citet{JoD:00} (their class $\mathcal{K}$); see their paper for further discussion. \phantomsection \addcontentsline{toc}{subsubsection}{Assumption \ref{a5}} \begin{asmp}\label{a5}% The kernel $K$ is a function from $\Re$ to $[-1,1]$ such that $K(0) = 1$, $K(x) = K(-x)$ for all $x$, $K(\cdot)$ is continuous at zero and all but a finite number of points, and \begin{gather*} \int_{-\infty}^{\infty} \lvert K(x) \rvert\, dx < \infty, \intertext{and} \int_{-\infty}^{\infty} \Bigg\lvert \int_{-\infty}^{\infty} K(z) e^{ixz}\,dz \Bigg\rvert\, dx < \infty. \end{gather*} \end{asmp} Last, we define some notation that will be used to derive the theoretical properties of our \oos\ statistics. The information set that contains the information available for forecasting $y_{t+1}$ is \begin{equation*} \Fs_t = \sigma(y_t, x_t, z_t, y_{t-1}, x_{t-1}, z_{t-1},\dots). \end{equation*} The adjusted \oos\ loss difference using a hypothetical value of $\beta$ to produce the benchmark forecast is denoted by \begin{equation*} f_t(\beta) = (y_{t+1} - x_t'\beta)^2 - (y_{t+1} - \yh_{t+1})^2 + (x_t'\beta - \yh_{t+1})^2. \end{equation*} Define the additional terms $\fh_t = f_t(\bh_t)$, $f_t = f_t(\btrue)$, \begin{gather*} \gh_t = 2 \Bigg[\oavg{s} (x_s'\bh_s - \yh_{s+1}) x_s'\Bigg]\, \Bigg[\tfrac{1}{T-1} \sum_{s=1}^{T-1} x_s x_s'\Bigg]^{-1} x_t \eph_{t+1} \intertext{and} g_t = 2 \E\Big[(x_t'\btrue - \yh_{t+1}) x_t'\Big] \, (\E x_t x_t')^{-1} x_t \ep_{t+1} \end{gather*} and the \oos\ averages $\fb = \osum{t} \fh_t/P$, $\fb^* = \osum{t} f_t/P$, $\gb = \osum{t} \gh_t/P$, and $\gb^* = \osum{t} g_t/P$. \subsection{Theoretical results} \label{sec:1b} Asymptotic normality of the \oos\ average now follows directly from the first three assumptions without other conditions. The proof is presented in the Appendix and follows \citet{Wes:96} closely. \phantomsection \addcontentsline{toc}{subsubsection}{Theorem \ref{res:1}} \begin{thm}\label{res:1} If Assumptions~\ref{a1}--\ref{a4} hold then \begin{equation*} \sqrt{P} (\fb - \E \fb^*) \to^d N(0, \sigma^2), \end{equation*} with $\sigma^2 = s_1 + 2(s_2 + s_3)$ and \begin{align*} s_1 &= \lim \var(\sqrt{P}\, \fb^*), & s_2 &= \lim \cov(\sqrt{P}\, \fb^*, \sqrt{P}\, \gb^*), & s_3 &= \lim \var(\sqrt{P}\, \gb^*). \end{align*} \end{thm} To use this result, we need a consistent estimator of $\sigma^2$. Define the \hac\ covariance estimator $\sigmah^2_1 = \sh_{11} + 2 (\sh_{12} + \sh_{13})$ and the \mds\ covariance estimator $\sigmah^2_2 = \sh_{21} + 2(\sh_{22} + \sh_{23})$ with \begin{align*} \sh_{11} &= \oavg{s,t} (\fh_s - \fb) (\fh_t - \fb) K(\tfrac{t-s}{P}), & \sh_{21} &= \oavg{t} (\fh_t - \fb)^2, \\ \sh_{12} &= \oavg{s,t} (\fh_s - \fb)(\gh_t - \gb) K(\tfrac{t-s}{P}), & \sh_{22} &= \oavg{t} (\fh_t - \fb)(\gh_t - \gb), \intertext{and} \sh_{13} &= \oavg{s,t} (\gh_s - \gb) (\gh_t - \gb), & \sh_{23} &= \oavg{t} (\gh_t - \gb)^2. \end{align*} These estimators are consistent under similar assumptions to Theorem~\ref{res:1}. \phantomsection \addcontentsline{toc}{subsubsection}{Lemma \ref{lem:2}} \begin{lem}\label{lem:2} If Assumptions~\ref{a1}--\ref{a5} hold then \begin{equation*} \sigmah_1^2 \to^p \sigma^2. \end{equation*} If Assumptions~\ref{a1}--\ref{a4} hold and $\{\varepsilon_{t}, \Fs_t\}$ is an \mds\ then \begin{equation*} \sigmah_2^2 \to^p \sigma^2. \end{equation*} \end{lem} Note that these results allow misspecification; asymptotic normality follows from the weak dependence of the underlying series and from the design of the test statistic. These statistics have typically been used to test the null hypothesis that the benchmark model is correctly specified --- that $\{\ep_t, \Fs_t\}$ is an \mds\ --- which implies that $f_t$ is an \mds\ as discussed at the beginning of this section. This is especially appealing in our framework, since the benchmark can be theoretically motivated so the \mds\ null would be a test of rationality. For example, \citet{GoW:08} test whether excess returns for the S\&P 500 are predictable out-of-sample, and any deviation of $\ep_{t+1}$ from an \mds\ is potentially interesting. But the \mds\ null hypothesis only affects the estimator of $\sigma^2$ (see Lemma~\ref{lem:2}); Theorem~\ref{res:1} continues to hold under any \dgp\ that satisfies Assumptions~\ref{a1}~--~\ref{a4}. In other settings, a researcher may want to test the weaker hypothesis that $\E \fb^* = 0$ but the benchmark may be misspecified. Our statistic can then be interpreted as an encompassing test as in \citet{HLN:98}, and would test whether the alternative model contains additional information that could make the benchmark model more accurate. This interpretation can be motivated by the combination forecasting model \begin{equation*} \yh_{\mathit{avg},t+1} = (1 - w) x_t'\btrue + w \yh_{t+1} \end{equation*} which can be rewritten in terms of forecast errors as \[ y_{t+1} - \yh_{\mathit{avg},t+1} = \ep_{t+1} + w (x_t'\btrue - \yh_{t+1}). \] The value \[ w = \frac{\E \ep_{t+1} (\yh_{t+1} - x_t'\btrue)}{\E (x_t'\btrue - \yh_{t+1})^2} \] minimizes the \mse\ of the combination forecast, so the combination model will have smaller \mse\ than the benchmark model, implying that the alternative uses information not in the benchmark, unless $\ep_{t+1}$ and $\yh_{t+1} - x_t'\btrue$ are uncorrelated. This correlation is exactly the quantity measured by our statistic. The final result puts together Theorem~\ref{res:1} and Lemma~\ref{lem:2} to produce our test statistics. The null hypothesis under misspecification is written in terms of $\E \ep_{t+1} \yh_{t+1}$ and not $\E \ep_{t+1} (\yh_{t+1} - x_t'\btrue)$, since $\E \ep_{t+1} x_t = 0$ by construction. This result is an immediate consequence of the previous two results and its proof is omitted. \phantomsection \addcontentsline{toc}{subsubsection}{Theorem \ref{thm:3}} \begin{thm}\label{thm:3} If Assumptions~\ref{a1}--\ref{a5} hold, then \begin{equation*} \sqrt{P} \fb / \sigmah_1 \to^d N(0, 1) \end{equation*} under the null hypothesis $\E(\varepsilon_{t+1} \yh_{t+1}) = 0$ for all $t = R,\dots,T-1$. If, instead, Assumptions~\ref{a1}--\ref{a4} hold, then \begin{equation*} \sqrt{P} \fb / \sigmah_2 \to^d N(0, 1) \end{equation*} under the null hypothesis that $\{\varepsilon_t, \Fs_t\}$ is an \mds. \end{thm} The test statistic proposed in Theorem~\ref{thm:3} can be easily extended in several ways. For longer-horizon forecasts (two or more periods ahead), $\sigmah_1$ will remain consistent but $\sigmah_2$ will not --- the forecast errors for a correctly specified $h$-step-ahead forecast have an MA($h-1$) dependence structure --- but using a generalized $\sigmah_2$ that reflects this covariance structure restores consistency. To test optimality under loss functions other than squared-error, one can replace the forecast error with the generalized forecast error \citep[see, for example][]{PaT:07,PaT:07b} and replace the \ols\ estimator of $\beta$ with the corresponding $M$-estimator. And the benchmark model can be replaced in general with a nonlinear model that satisfies the assumptions of \citet{Wes:96} or \citet{Mcc:00} by making the appropriate changes to $f_t$ and $g_t$. (See \citealp{Wes:96}, and \citealp{Mcc:00}, for details.) The general approach of using a recursive window to estimate the benchmark and a fixed-length rolling window to estimate the alternative applies quite broadly. %%% Local Variables: %%% mode: latex %%% TeX-master: "mixedwindow" %%% TeX-command-extra-options: "-shell-escape" %%% End:
{ "alphanum_fraction": 0.7065544567, "avg_line_length": 44.6, "ext": "tex", "hexsha": "ff51e9d17743d98924ed24f332bc344ddab562c4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3b25a5acad1da570bcd72806e6c32fbf9c54845d", "max_forks_repo_licenses": [ "MIT", "Unlicense" ], "max_forks_repo_name": "grayclhn-econ/mixedwindow", "max_forks_repo_path": "S2-normality.tex", "max_issues_count": 14, "max_issues_repo_head_hexsha": "3b25a5acad1da570bcd72806e6c32fbf9c54845d", "max_issues_repo_issues_event_max_datetime": "2016-02-08T21:21:38.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-07T16:44:10.000Z", "max_issues_repo_licenses": [ "MIT", "Unlicense" ], "max_issues_repo_name": "grayclhn-econ/mixedwindow", "max_issues_repo_path": "S2-normality.tex", "max_line_length": 104, "max_stars_count": null, "max_stars_repo_head_hexsha": "3b25a5acad1da570bcd72806e6c32fbf9c54845d", "max_stars_repo_licenses": [ "MIT", "Unlicense" ], "max_stars_repo_name": "grayclhn-econ/mixedwindow", "max_stars_repo_path": "S2-normality.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5423, "size": 16279 }
\subsubsection{\lst{BigInt.toByte} method (Code 106.1)} \label{sec:type:BigInt:toByte} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Converts this numeric value to \lst{Byte}, throwing exception if overflow. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Byte} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.modQ} method (Code 6.1)} \label{sec:type:BigInt:modQ} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Returns this \lst{mod} Q, i.e. remainder of division by Q, where Q is an order of the cryprographic group. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{BigInt} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:ModQ]{\lst{ModQ}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toShort} method (Code 106.2)} \label{sec:type:BigInt:toShort} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Converts this numeric value to \lst{Short}, throwing exception if overflow. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Short} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.plusModQ} method (Code 6.2)} \label{sec:type:BigInt:plusModQ} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Adds this number with \lst{other} by module Q. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \lst{other} & \lst{: BigInt} & \text{// Number to add to this.} \\ \end{array}\) \\ \hline \bf{Result} & \lst{BigInt} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PlusModQ]{\lst{PlusModQ}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toInt} method (Code 106.3)} \label{sec:type:BigInt:toInt} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Converts this numeric value to \lst{Int}, throwing exception if overflow. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Int} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.minusModQ} method (Code 6.3)} \label{sec:type:BigInt:minusModQ} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Subtracts \lst{other} number from this by module Q. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \lst{other} & \lst{: BigInt} & \text{// Number to subtract from this.} \\ \end{array}\) \\ \hline \bf{Result} & \lst{BigInt} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:MinusModQ]{\lst{MinusModQ}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toLong} method (Code 106.4)} \label{sec:type:BigInt:toLong} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Converts this numeric value to \lst{Long}, throwing exception if overflow. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Long} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.multModQ} method (Code 6.4)} \label{sec:type:BigInt:multModQ} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Multiply this number with \lst{other} by module Q. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \lst{other} & \lst{: BigInt} & \text{// Number to multiply with this.} \\ \end{array}\) \\ \hline \bf{Result} & \lst{BigInt} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:MethodCall]{\lst{MethodCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toBigInt} method (Code 106.5)} \label{sec:type:BigInt:toBigInt} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Converts this numeric value to \lst{BigInt} \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{BigInt} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toBytes} method (Code 106.6)} \label{sec:type:BigInt:toBytes} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Returns a big-endian representation of this numeric value in a collection of bytes. For example, the \lst{Int} value \lst{0x12131415} would yield the collection of bytes \lst{[0x12, 0x13, 0x14, 0x15]}. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Coll[Byte]} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx} \subsubsection{\lst{BigInt.toBits} method (Code 106.7)} \label{sec:type:BigInt:toBits} \noindent \begin{tabularx}{\textwidth}{| l | X |} \hline \bf{Description} & Returns a big-endian representation of this numeric in a collection of Booleans. Each boolean corresponds to one bit. \\ \hline \bf{Parameters} & \(\begin{array}{l l l} \end{array}\) \\ \hline \bf{Result} & \lst{Coll[Boolean]} \\ \hline \bf{Serialized as} & \hyperref[sec:serialization:operation:PropertyCall]{\lst{PropertyCall}} \\ \hline \end{tabularx}
{ "alphanum_fraction": 0.6137830859, "avg_line_length": 23.1194029851, "ext": "tex", "hexsha": "c0ac0125267e3107b4af9c60b184f0960a0b4178", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bd4863746834bc08421148afd0ccbd09e0441c99", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gagarin55/sigmastate-interpreter", "max_forks_repo_path": "docs/spec/generated/BigInt_methods.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bd4863746834bc08421148afd0ccbd09e0441c99", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gagarin55/sigmastate-interpreter", "max_issues_repo_path": "docs/spec/generated/BigInt_methods.tex", "max_line_length": 131, "max_stars_count": null, "max_stars_repo_head_hexsha": "bd4863746834bc08421148afd0ccbd09e0441c99", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gagarin55/sigmastate-interpreter", "max_stars_repo_path": "docs/spec/generated/BigInt_methods.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2086, "size": 6196 }
\section{Result} \subsection{Distance Between Two Lasar Device} \begin{table}[H] \centering \begin{tabular}{|l|c |c |c |} \hline \multicolumn{4}{|c|}{distance $S$ [mm] $\pm$ 1 [mm]} \\ \hline S & Ruler Start Point & Ruler End Point & Calculated Length \\ \hline $S_1$ & 30.0 & 177.0 & 147.0 \\ \hline $S_2$ & 60.0 & 207.0 & 147.0 \\ \hline $S_3$ & 40.0 & 186.5 & 146.5 \\ \hline \end{tabular} \caption{Distance measurement data.} \end{table} Then we can find $$ \bar{S} = \frac{1}{3} \sum_{k=1}^{3} S_k = 146.8 mm \pm 1 mm $$ $$ u_{S,r} = 0.68 \% $$ \subsection{Time Measurement} \begin{table}[H] \centering \begin{tabular}{|l|c|} \hline \multicolumn{2}{|c|}{time $t$ [s] $\pm$ 0.01 [s] } \\ \hline $t_1$ & 6.75 \\ \hline $t_2$ & 6.82 \\ \hline $t_3$ & 6.91 \\ \hline $t_4$ & 6.88 \\ \hline $t_5$ & 6.91 \\ \hline $t_6$ & 6.84 \\ \hline \end{tabular} \caption{Time measurement data.} \end{table} Then we can find $$ \bar{t} = \frac{1}{6} \sum_{k=1}^{6} t_k =6.85 s \pm 0.01 s $$ $$ u_{t,r} = 0.15 \% $$ \subsection{The Diameters of The Balls} The initial reading of the meter is 0.38 mm. Thus, the raw data of measurement should firstly minus 0.38 mm, and is presented as following, \begin{table}[H] \centering \begin{tabular}{|p{2cm}|p{3cm}||p{2cm} |p{3cm} |} \hline \multicolumn{4}{|c|}{diameter $d$ [mm] $\pm$ 0.005 [mm] } \\ \hline $d_1$ & 1.995 & $d_6$ & 1.995 \\ \hline $d_2$ & 1.995 & $d_7$ & 2.000 \\ \hline $d_3$ & 2.000 & $d_8$ & 1.800 \\ \hline $d_4$ & 1.995 & $d_9$ & 1.995 \\ \hline $d_5$ & 2.000 & $d_{10}$ & 1.995 \\ \hline \end{tabular} \caption{Ball diameter measurement data.} \end{table} Then we can find $$ \bar{d} = \frac{1}{10} \sum_{k=1}^{10} d_k = 1.977 mm \pm 0.005 mm $$ $$ u_{t,r} = 0.25 \% $$ \subsection{The Inner Diameter of The Flask} \begin{table}[H] \centering \begin{tabular}{|p{1cm}|c|} \hline \multicolumn{2}{|c|}{diameter $D$ [mm] $\pm$ 0.02 [mm] } \\ \hline $D_1$ & 61.40 \\ \hline $D_2$ & 61.46 \\ \hline $D_3$ & 61.20 \\ \hline $D_4$ & 61.36 \\ \hline $D_5$ & 61.20 \\ \hline $D_6$ & 61.50 \\ \hline \end{tabular} \caption{Flask diameter measurement data.} \end{table} Then we can find $$ \bar{D} = \frac{1}{6} \sum_{k=1}^{6} D_k = 61.3533 mm \pm 0.02 mm $$ $$ u_{D,r} = 0.03\% $$ \subsection{Other Physical Quantities} \begin{table}[H] \centering \begin{tabular}{|c|c|} \hline density of the castor oil $ \rho_1 [g/cm^3] \pm 0.001 [g/cm^3] $ & 0.955 \\ \hline mass of 40 metal balls $ m [g] \pm 0.001 [g] $ & 1.357 \\ \hline temperature in the lab $ T [\circ C] \pm 2 [\circ C] $ & 25 \\ \hline acceleration due to gravity in the lab $ g [m/s^2] $ & 9.794 \\ \hline \end{tabular} \caption{Other Physical Quantities Measurement} \end{table} \subsection{Calculation of Density of One Ball} The mass of one metal ball can be calculated as $$ m_0 = \frac{m}{40} = \frac{1.357 \times 10^{-3} }{40} = 3.3925 \times 10^-5 kg \pm (2.500 \times 10^-8) kg $$ We can furtherly get the density, $$ \rho_2 = \frac{m_0}{\frac{1}{6} \pi d^3} = \frac{3.3925 \times 10^-5 }{\frac{1}{6} \times 3.151593 \times (1.977 \times 10^-3)^3 } = 8.385 \times 10^3 kg/m^3 \pm (5.756 \times 10 ) kg/m^3 $$ $$ u_{\rho_2,r} = 0.6875\% $$ \subsection{Calculation For The Viscosity Coefficient} From the last equation in introduction part \begin{multline*} \mu = \frac{2}{9} g R^2 \frac{( \rho_2 - \rho_1 ) t }{s} (1 + 2.4 \frac{R}{R_c}) = \frac{2 \times (1.977 \times 10^-3 \times \frac{1}{2})^2 (8.385\times 10^3 - 0.595 \times 10^3 ) \times 9.974 \times 6.85 }{9 \times 146.8\times 10^-3 \times (1 + 2.4 \times \frac{1.977}{61.3533})} \\ = 0.7307 Pa \times s \pm (6.8329 \times 10^-3 ) Pa \times s \end{multline*} $$ u_{\mu,r} = 0.93512 \% $$
{ "alphanum_fraction": 0.5626429479, "avg_line_length": 29.8106060606, "ext": "tex", "hexsha": "d9550250ca082896c74419e6fad2a30620c7411f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c0a5d1992967b1552d6f7ea0806c9244d58f64ac", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "iamwrm/VP141", "max_forks_repo_path": "E2/part/5r.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c0a5d1992967b1552d6f7ea0806c9244d58f64ac", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "iamwrm/VP141", "max_issues_repo_path": "E2/part/5r.tex", "max_line_length": 88, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c0a5d1992967b1552d6f7ea0806c9244d58f64ac", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "iamwrm/VP141", "max_stars_repo_path": "E2/part/5r.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-24T11:28:04.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-24T11:28:04.000Z", "num_tokens": 1793, "size": 3935 }
\documentclass[12pt, a4paper,titlepage]{article} \usepackage{appendix, pdfpages, float} \usepackage{fullpage,graphicx,psfrag,amsmath,amsfonts,verbatim} %use this package to set linespacing as desired \usepackage{setspace} \singlespacing \usepackage{subcaption} \usepackage[euler]{textgreek} \usepackage{titlesec} \usepackage{tabularx} %\usepackage[dvipsnames]{xcolor} \usepackage{indentfirst} \usepackage{multirow} \titleformat {\subsection} {\normalfont\large\itshape} {\thesubsection.}{0.5ex}{} \titleformat {\paragraph}[runin] {\normalfont} {}{0.5ex}{} \graphicspath{{./images/}} % Bibliography %reference manager \usepackage[backend=bibtex, sorting=none, style=numeric-comp, bibstyle=ieee]{biblatex} \bibliography{biblio} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{titlepage} \title{Dynamic Traffic Lights Management System Based On Traffic Flow Simulation With Performance Comparison of Various Smart Traffic Lights Technologies \vspace{1cm}\\Final Report} \author{Marcos Tulio Fermin Lopez\\Department of Electrical Engineering\\The City College of New York \vspace{2cm}\\Presented to Prof. Mohamed Ali} \date{Fall 2021} \end{titlepage} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \maketitle \newpage \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \label{sec_intro} Transportation Engineering studies the deployment of new technologies for the safe design, construction, and maintenance of transportation infrastructure. Some of these technologies implement novel ways to manage traffic, congestion, and urban mobility, with the help of artificial intelligence, computer vision, intelligent antennas, high-speed cameras, cloud computing, and IoT devices for real-time supervision. For example, as new ideas develop, traffic lights controllers are transitioning from static to dynamic systems. In general, a Dynamic Traffic Lights Controller is a system that adapts its timing according to the variations of traffic flow conditions instead of relying on a preprogrammed routine. In recent years government agencies and corporations in the Transportation Engineering field have dedicated resources to research methods for improving aging traffic lights systems to reduce the time drivers spend at intersections and promote smoother traffic flow. However, testing dynamic traffic lights controller designs can be costly, primarily when using state-of-the-art equipment that may not be readily available \cite{Jin17}. For this reason, researchers often rely on simulations of physical systems to test their designs and discover new opportunities for improvement at a low cost. The simulation models proposed by researchers in \cite{Joyo20,Yaqub20} are of particular interest. In these simulations, traffic lights controllers use a smart mobile carrier antenna to communicate with the drivers' cellphones to determine the number of vehicles and their speed at a given intersection, improving the capabilities of previously installed traffic lights by adapting their timing based on traffic conditions. In addition, simulation models test different traffic light timing configurations and create animations that illustrate how the model behaves \cite{Kamran17}. Traffic flow modeling is essential to design conditions that provide drivers and pedestrians with a fast and enjoyable commute by solving congestion problems and producing more efficient traffic lights systems \cite{Wee13}. Multiple papers in the literature provide traffic flow simulations using statistical methods or dedicated open-source urban mobility simulation frameworks such as Eclipse SUMO, MATSim, NETSim and PTV Vissim \cite{Gartner92, Salimifard13}. An Arena 10 software simulation is proposed in \cite{Salimifard13} by solving a general traffic flow problem, treating the model as a discrete queuing system. In \cite{Gartner92}, a statistical method for evaluating alternative control strategies using NETSim is proposed using a pseudo-random number of cars. In both studies, simulation relies on certain conditions that may not be met at all times. The proposed methods also face challenges in practical implementation. Although most papers in the literature propose simulation models using open-source software with a steep learning curve, very few of them design their own simulation software from scratch. However, sometimes it is more convenient to develop software that allows for a quicker implementation without compromising model accuracy and performance. Creating of a Python algorithm for simulating a microscopic traffic flow model with dynamic traffic lights based on traffic conditions can achieve a reliable environment for basic congestion analysis using either a high-speed camera, a smart antenna or a PIR sensor. Thus providing the fundamental principles of this research project. \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Contributions and Outline} This research project proposes the microscopic simulation of a traffic flow model with Dynamic Traffic Lights using Pygame. The traffic lights timing self-adapts based on the traffic density at an arbitrary intersection. This project also evaluates the performance of a smart antenna, high-speed cameras, and PIR sensors as distinct traffic management technologies in the simulated environment. \\ The contributions of this simulation are the following: \begin{enumerate} \item Creation of a microscopic traffic flow simulation in Python 3 using Pygame. \item Creation of a GUI. \item Creating a simulated environment that randomly generates cars, bikes, trucks, and buses at an arbitrary intersection. Allowing fundamental traffic flow analysis using three traffic management technologies. \item Design an algorithm for a Dynamic Traffic Lights Controller based on the traffic variations in the simulated traffic model, integrating a Smart Antenna, High-Speed Cameras, and PIR sensors and their respective logic. \item Performance evaluation of a smart antenna, high-speed cameras, and PIR sensors as distinct traffic management technologies in the simulated environment. \item Creation of tables and graphs demonstrating the simulation results in terms of the total number of cars served, the average waiting time at a red light, and efficiency for each technology. \item Reduction of daily commute time based on lower time waiting at an intersection and efficient traffic lights timing given the instantaneous traffic at the arbitrary intersection. \end{enumerate} This final report consists of 7 sections. The introduction presented above is section \ref{sec_intro}. Section \ref{sec_Methodology} provides a detailed explanation of the methods used to accomplish each simulation. Section \ref{sec_results} presents the results obtained after running the three simulations. Section \ref{sec_Conclusion} presents the conclusions and analysis of the obtained data. Section \ref{sec_future} presents ideas about future work that can be done to improve the project. Section \ref{sec_references} lists the references of this project. Finally, section \ref{sec_codes} contains a QR code to access the Github and python codes for this project. \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Methodology} \label{sec_Methodology} This section details the methods, programming logic, and assumptions used to create the simulation environment and implement each traffic management technology. The simulations developed in this project were written in Python programming language with the help of pygame as the main library. Pygame provides a set of modules that allow the creation of 2D video games integrating audio and graphics. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Graphical User Interface} \label{subsec_gui} To provide a simple method of interacting with the simulations, a graphical user interface (GUI) was created. This GUI allows the user to access each simulation individually and plot the final data from a secondary menu. The final version of the GUI is provided below: \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/GUI_1} \caption{Main page of the graphical user interface.} \label{fig:gui1} \end{figure} After pressing the "Menu" button in figure \ref{fig:gui1}, the user is taken to a secondary page as shown in figure \ref{fig:gui2}, where each simulation can be launched in any order, and the available data can be plotted. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/GUI_2} \caption{Secondary page of the graphical user interface.} \label{fig:gui2} \end{figure} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Antenna Simulation} \label{subsec_antenna} This module simulates the Dynamic Traffic Lights with a smart antenna covering the intersection. The fundamental assumption for the smart antenna technology is to provide a highly efficient method to dynamically control traffic lights by exploiting the capabilities of the telecommunications antennas currently in use by mobile carriers. It determines the number of cars present in each direction of the intersection by summing the number of vehicles East to West and North to South that collide with each lane of the intersection. The green light timing of each traffic light ranges from 60 to 180 seconds depending on the number of cars present. The antenna will verify the presence of vehicles in either direction every 60 seconds. If there are cars present in the opposite direction, the light will switch. In case there is no traffic in either direction the green lights will stay on for 180 seconds to then switch to the opposite direction, so on and so forth. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/Antenna} \caption{Antenna Simulation Window} \label{fig:antenna} \end{figure} \subsubsection{Python Code Explanation} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a1} \caption{} \label{fig:a1} \end{figure} As shown in figure \ref{fig:a1}, This class makes rectangles (referred to as Lasers, which represent the antenna's operating range in each direction) used for collision detection. We give the surface x,y coordinates to place it on the screen. Since we have four lanes, we make four laser-type objects and add them to the sprite group called "laser group." CarDidComeOnLeft, CarDidComeOnRight, CarDidComeOnTop and CarDidComeOnBottom are Booleans used to detect if a car did come onto a particular lane. They are initially false and set to true if vehicles collide with the corresponding laser or rectangle via the pygame.sprite.spritecollide function. We then reset the booleans above in case a vehicle is not on the laser to clear the carDidCome variables. We check for collisions with the pygame.sprite.spritecollide function and if cars are present the variables are set true. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a2} \caption{} \label{fig:a2} \end{figure} We keep a count of total cars in each lane with the above variables. Total cars in each direction (North to South and East to West) are calculated by adding their respective lanes. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a3} \caption{} \label{fig:a3} \end{figure} When cars are on their respective lane which is detected by the lasers, we add that car into the total variable of that lane. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a4} \caption{} \label{fig:a4} \end{figure} We use the ternary operator along with carDidCome variables to change the detection font. We then go onto detection and switching logic for which we employ our f, cycle, carsDidCome and state variables to check the current state. State variable is used to check if we are either on the x-axis (0 – 2) lane or y-axis (1 - 3) lane. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a5} \caption{} \label{fig:a5} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a6} \caption{} \label{fig:a6} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/a9} \caption{} \label{fig:a9} \end{figure} As shown in figures \ref{fig:a5}, \ref{fig:a6} and \ref{fig:a9}, if the variable "state" is 0, we are at the x-axis serving lanes 0 and 2. Vehicles move in a lane until at least the greenMinTime has passed, and if cars are present in the opposite lane, we stop executing and go to the opposite lane and vice versa until greenMaxTime has passed. We then continue to execute until an entire cycle has finished for the axis. A cycle is finished when there are three executions in that axis. We chose three executions since our greenMaxTime is 180 and greenMinTime is 60; hence: Cycles = 180/60 = 3 We check which lane will be executed next in the next axis when execution is finished. In this case, the axis will be y since our state was 0. Since we want the lane with more cars to execute, we check the length of the currentCars variable, then the one with the largest value will be our next lane to execute, adjusting the cycles and state flags appropriately. Once we are executing the other axis and its three executions are over, we will check which lane in the other axis (x-axis) has more cars, and that will be our next lane to execute and so on.\\ \textbf{Traffic Lights' Switching Logic}:\\ We have two axes in our simulation, x, and y. The two comprise lanes 0, 2 for the x-axis and lanes 1,3 for the y-axis. We need to stay in one direction until greenMaxTime has run out. However, if other cars are in the opposing lane and greenMinTime has run out, we swap onto the other lane. Then we must wait until greenMinTime has passed to check if cars are present in the lane, and if so, we switch back to the previous lane and execute it until greenMinTime and greenMaxTime run out. We then have to go to the other axis and find out which lane has more cars. The lane with more cars will be our current lane, and we shall continue executing as before, waiting for Tmax to run out and if Tmin has passed and there are cars on the opposite lane, we will switch to that lane. \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Camera Simulation} \label{subsec_camera} This module simulates the Dynamic Traffic Lights with a high speed camera in each direction. The fundamental assumption for the high speed camera technology is to provide a highly efficient image processing method to dynamically control traffic lights by detecting the presence of vehicles at the intersection. In a real-time IoT project the cameras would be connected to a image processing module that detects the vehicles using computer vision. However, for this simulation, only the expected behavior of the traffic lights after image processing was considered. This module assumes that the cameras are sending their live feed to a image processing unit, and the traffic lights' timing self-adjust based on the processing done by this unit. The green light timing of each traffic light changes dynamically by providing a longer greentime to the direction that has the most vehicles. The signals are activated in a clock-wise direction. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/Camera} \caption{} \label{fig:camera} \end{figure} \subsubsection{Python Code Explanation} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/c3} \caption{} \label{fig:c3} \end{figure} As shown in figure \ref{fig:c3} above, the setTime() function defines the method used to determine the timing of the green lights based on the number of vehicles present at the intersection after being detected by the camera. First the algorithm counts the number of vehicles in the next green direction to allocate time according to the traffic density. Then it counts the number of vehicles for each direction based on vehicle class to determine how many vehicles of each class are detected by the camera at any given time. The green lights timing is calculated by using the following formula, using the ceiling function. The ceiling function f(x) takes a real number x as an input and returns the least integer greater than or equal to x: \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/f1} \caption{Formula to calculate greenTime of camera} \label{fig:f1} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/c4} \caption{} \label{fig:c4} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/c1} \caption{} \label{fig:c1} \end{figure} As shown in figure \ref{fig:c1} above, we iterate over each of the signals and check if the one we are currently on is green. If it is, we check if its yellow time is 1 (which would mean it will switch to red in a second) and if so, we blit STOP as the signal text. If not, we blit the current yellow time. If the current signal is not green, we check if its red timer is less than 10 seconds. If it is, we then check if the time is precisely 0, which would denote this is the signal that must be the next green signal and blit GO as the text. If the red timer's time is not less than or equal to 10, we blit the current red timer. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/c2} \caption{} \label{fig:c2} \end{figure} We blit our texts and timers following the light switching logic, including our simulation time and vehicle crossed count. We then blit the camera images, moving each vehicle in our simulation sprite group called "move."\\ \textbf{Selection Logic}:\\ We start with signal [0] as our initial signal and the subsequent signals are calculated with the nextGreen variable which calculates by: \begin{equation}\label{key} (currentGreen+1) \, \% \, noOfSignals \end{equation} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{PIR Simulation} \label{subsec_pir} This module simulates the Dynamic Traffic Lights with PIR Sensors at the intersection. The fundamental assumption for the PIR Sensor technology is to provide a method of detecting the presence of cars based on movement in each direction of the intersection. In this simulation, the PIR sensors are represented by "rays" that detect the presence of vehicles when they "collide" with the rays within the sensors' operating range. The PIR sensor logic is implemented following a "master-slave" module where East is lane 0, West is lane 2, North is lane 1 and South is lane 3. With this assumption the sensors check every greenmax if vehicles are present in the opposite direction. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/PIRsensor} \caption{} \label{fig:pirsensor} \end{figure} \subsubsection{Python Code Explanation} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p1} \caption{} \label{fig:p1} \end{figure} As shown in figure \ref{fig:p1}, This class makes rectangles (referred to as Lasers, which represent the antenna's operating range in each direction) used for collision detection. We give the surface x,y coordinates to place it on the screen. Since we have four lanes, we make four laser-type objects and add them to the sprite group called "laser group." CarDidComeOnLeft, CarDidComeOnRight, CarDidComeOnTop and CarDidComeOnBottom are Booleans used to detect if a car did come onto a particular lane. They are initially false and set to true if vehicles collide with the corresponding laser or rectangle via the pygame.sprite.spritecollide function. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p2} \caption{} \label{fig:p2} \end{figure} Figure \ref{fig:p2} illustrates that leftServiced, rightServiced, topServiced, and bottomServiced are Booleans which determine if a lane has been serviced. A lane is serviced when the current lane and the lane opposite to it have finished execution. For example, if the current lane is left (0) and finishes execution for at least the greenMinTime the control gets transferred to the opposite lane right (2). When (2) has been serviced for at least the greenMinTime, the left lane is said to be serviced. The f variables are used to lockout a lane for executing again. If the variable is false, that particular lane won’t be executable \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p3} \caption{} \label{fig:p3} \end{figure} We then reset the booleans above in case a vehicle is not on the laser to clear the carDidCome variables. We check for collisions with the pygame.sprite.spritecollide function and if cars are present the variables are set true. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p4} \caption{} \label{fig:p4} \end{figure} Using the ternary operator and carDidCome variables if cars are present, we show Yes and if not, we show No as the text in Cars present. This process emulates the detection of movement sensed by the PIR sensors. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p5} \caption{} \label{fig:p5} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/p6} \caption{} \label{fig:p6} \end{figure} As shown in figures \ref{fig:p5} and \ref{fig:p6}, we check every lane with its own block, which looks like the above. This block is used to check lane 0. Each lane gets greenMaxTime as its maximum green time. However, if there are vehicles in the opposing lane (initially lane 2), and greenMinTime has passed, we will stop executing the direction of current green. The opposing lane will then proceed to execute for greenMaxTime, or if vehicles are present in the opposite lane it will execute for greenMaxTime only. Once the opposite lane has been serviced, the lane is said to be serviced. In this case, the lane serviced will be lane 0. We will then check lane one and repeat the above process until we have serviced all lanes, at which point we will reset all flags so we can start executing from lane 0 again.\\ \textbf{PiR Switching Logic}:\\ Our main idea is to service a lane for at least greenMinTime if there are no cars in the opposite lane. To achieve this, we need to monitor the lane whose turn it is and the current executing lane. The current executing lane is the one that needs to be serviced. When the current lane and its opposing lane are executed, they are both considered "served." The current lane is the one that is currently executing (currently green). We then choose a lane (0 in this case) as an example to explain the concept. First, we will let lane (0) execute for at least the greenMinTime; if greenMinTime finishes and there are no cars in the opposite lane until the time reaches 0, we will say lane 0 has been serviced and move onto the next lane in turn. However, if greenMinTime is finished and there are cars in the opposing lane, we will stop executing the current lane (0) and make the current lane equal to the opposite lane (lane two). When lane two's execution time reaches greenMinTime, we will detect if cars are in the opposite lane(lane 0). If there are cars in lane 0, we will stop executing lane two and say lane 0 is serviced, move onto the next lane in turn (1), and so on. If there are no cars in lane 0, we will keep executing lane two until the time runs out and move onto the next lane in turn. \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Results and Data Analysis} \label{sec_results} This section presents the results obtained after running the three simulations for 1 hour (3600 seconds). In this project, a traffic flow simulated environment that randomly generates cars, bikes, trucks, and buses at an arbitrary intersection was created. Allowing fundamental traffic flow analysis using three traffic management technologies. The performance of these three technologies was compared based on the total number of cars served and the average waiting time at red lights. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/VehiclesServed_vs_Traffictechnology} \caption{Vehicles served based on traffic management technology.} \label{fig:vehiclesservedvstraffictechnology} \end{figure} Figure \ref{fig:vehiclesservedvstraffictechnology} shows the total number of cars served at the intersection during 1 hour of runtime, and organizes this information by traffic management technology. As shown above, the PIR sensor served 3,328 vehicles, the High Speed Camera served 3,828 vehicles, and the Smart Antenna served 4,267 vehicles. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/AWT_vs_TrafficTechnology} \caption{} \label{fig:awtvstraffictechnology} \end{figure} Figure \ref{fig:awtvstraffictechnology} compares the average waiting time at a red light experienced by the vehicles generated in the simulation environment. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/CarsServed_vs_Servicedirection} \caption{} \label{fig:carsservedvsservicedirection} \end{figure} Figure \ref{fig:carsservedvsservicedirection} shows the number of vehicles served by the Smart Antenna based on North to South and East to West directions.\\ \textbf{Data Analysis} We ran the three simulations for 1 hour (3600 seconds) using the developed Python algorithms. In the simulations the traffic lights change dynamically depending on the number of cars present at the intersection using the logic explained in section \ref{sec_Methodology} for each technology. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{images/Tables} \caption{Tables comparing AWT, cars served and efficiency of each technology.} \label{fig:tables} \end{figure} In the simulation results shown in figure \ref{fig:tables} above, we can observe that during the Smart Antenna simulation the vehicles experienced an average waiting time of 115.833 seconds and 4,267 vehicles were served, in the High Speed Camera simulation vehicles experienced an average waiting time of 158.467 seconds and 3,828 vehicles were served, and that in the PIR sensor simulation vehicles experienced an average waiting time of 195.767 seconds and 3,828 vehicles were served. From the obtained data we can determine the following: \begin{itemize} \item The \textbf{Smart Antenna is more efficient at reducing congestion at the intersection} than the High Speed Camera and the PIR Sensor. \item The \textbf{Smart Antenna is 27.22\%} more efficient at reducing average waiting time than the High Speed camera, and \textbf{41.03\%} more efficient than the PIR Sensor. \item The \textbf{Smart Antenna served 11.47\% more vehicles than the High Speed Camera}, and \textbf{28.22\% more vehicles than the PIR sensor}. \end{itemize} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusion} \label{sec_Conclusion} After completing this research project a microscopic simulation of a traffic flow model with Dynamic Traffic Lights using Pygame was developed. In this simulated environment the traffic lights timing self-adapts based on the traffic density at an arbitrary intersection. The performance of a smart antenna, high-speed cameras, and PIR sensors as distinct traffic management technologies in the simulated environment was compared, to determine which technology is more efficient at reducing congestion. Although real world traffic scenarios exhibit a high level of complexity, our simulations prove to be a reliable method of basic traffic flow modeling. After running the three simulations for 1 hour, we observed that the Smart Antenna is more efficient at reducing congestion at the intersection than the High Speed Camera and the PIR Sensor. We also observed that the Smart Antenna is 27.22\% more efficient at reducing average waiting time than the High Speed camera, and 41.03\% more efficient than the PIR Sensor. And that also the Smart Antenna served 11.47\% more vehicles than the High Speed Camera, and 28.22\% more vehicles than the PIR sensor. As demonstrated by the obtained results, the Smart Antenna is the superior traffic management technology. \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Future Work} \label{sec_future} \begin{itemize} \item {\large Implement the Traffic Lights Controller in Real-Time using Computer Vision} \begin{itemize} \item Use Computer Vision algorithms to process the video image of the High Speed Camera and determine the real-time presence of vehicles and pedestrians at the intersections \cite{Rachmadi11}. \item Use NVIDIA Jetson Nano or Jetson AGX Xavier developer kits as the Computer Vision processing unit. \item Build a functional traffic light for real-time testing. \end{itemize} \end{itemize} \begin{itemize} \item {\large Create a Physical Traffic Lights Controller with PIR Sensor using IoT Embedded Board} \begin{itemize} \item Create a physical Traffic Lights Controller by programming an open-source dedicated IoT Embedded Board (FPGA, Arduino, Raspberry Pi, etc). \item Create a model of an intersection and place a PIR sensor at the corners to sense vehicles' motion. \item Optimize Traffic Lights Timing based on PIR sensor readings \cite{Zachariah17}. \end{itemize} \end{itemize} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{References} \label{sec_references} \begin{singlespace} % use single-line spacing for multi-line text within a single reference \setlength\bibitemsep{\baselineskip} %manually set separation between items in bibliography to double space \printbibliography[heading=none] \end{singlespace} \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Appendix: Python Codes} \label{sec_codes} \subsection{Github} To access this project on Github scan the QR Code below: \begin{figure}[H] \centering \includegraphics[width=.9\linewidth]{images/qr-code} https://github.com/marcostfermin/Dynamic-Traffic-Lights-Simulation \label{fig:qr-code} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \end{document}
{ "alphanum_fraction": 0.7609694538, "avg_line_length": 64.5177453027, "ext": "tex", "hexsha": "f36a8e2284fb690389ca3e48624f6f5a545631ff", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "64e4c4eb6fa5f10362f3c5ce7b1fb172f329d612", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "marcostfermin/Dynamic-Traffic-Lights-Simulation", "max_forks_repo_path": "Final Report LaTeX/MF_Final_Report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "64e4c4eb6fa5f10362f3c5ce7b1fb172f329d612", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "marcostfermin/Dynamic-Traffic-Lights-Simulation", "max_issues_repo_path": "Final Report LaTeX/MF_Final_Report.tex", "max_line_length": 1261, "max_stars_count": null, "max_stars_repo_head_hexsha": "64e4c4eb6fa5f10362f3c5ce7b1fb172f329d612", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "marcostfermin/Dynamic-Traffic-Lights-Simulation", "max_stars_repo_path": "Final Report LaTeX/MF_Final_Report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6892, "size": 30904 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % This is a slightly modified template of the one built by % Steven V. Miller. Information can be found here: % http://svmiller.com/blog/2016/02/svm-r-markdown-manuscript/ % % I added the use of raggedright to the anonymous option % because journals, the ability to put all the footnotes % in endnotes, and the ability to manually adjust % the starting page from the YAML header. % % Here are the options that you can define in the YAML % header. % % fontfamily - self-explanatory % fontsize - self-explanatory (e.g. 10pt, 11pt) % anonymous - true/false. If true, names will be supressed and the % text will be double-spaced and ragged % right. For submission. % endnotes - true/false. If true, the footnotes will be put in a % section at the end just ahead of the references. % keywords - self-explanatory % thanks - shows up as a footnote to the title on page 1 % abstract - self explanatory % appendix - if true, tables and figures will have in % front % appendixletter - The letter to append to tables and figures in % appendix % pagenumber - Put in a number here to get a starting page number % besides 1. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \documentclass[11pt,]{article} \usepackage[left=1in,top=1in,right=1in,bottom=1in]{geometry} \usepackage{amsmath} \usepackage{float} \usepackage{dcolumn} \newcommand*{\authorfont}{\fontfamily{phv}\selectfont} \usepackage[]{mathpazo} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{abstract} \renewcommand{\abstractname}{} % clear the title \renewcommand{\absnamepos}{empty} % originally center \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \renewenvironment{abstract} {{% \setlength{\leftmargin}{0mm} \setlength{\rightmargin}{\leftmargin}% }% \relax} {\endlist} \makeatletter \def\@maketitle{% \newpage % \null % \vskip 2em% % \begin{center}% \let \footnote \thanks {\fontsize{18}{20}\selectfont\raggedright \setlength{\parindent}{0pt} \@title \par}% } %\fi \makeatother \setcounter{secnumdepth}{0} \usepackage{graphicx} % We will generate all images so they have a width \maxwidth. This means % that they will get their normal width if they fit onto the page, but % are scaled down if they would overflow the margins. \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth \else\Gin@nat@width\fi} \makeatother \let\Oldincludegraphics\includegraphics \renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=\maxwidth]{#1}} \title{Are Women Actually Better Doctors? The Relationship Between Female Physicians and Maternal Mortality Ratios \thanks{Thanks to Dr.~Aaron Gullickson for putting up with my complete lack of knowledge in both statistics and R. Your patience is appreciated. I will not miss feeling like a complete idiot every Tuesday and Thursday.} } \author{\Large Olivia Atkinson\vspace{0.05in} \newline\normalsize\emph{University of Oregon, Political Science} } \date{} \usepackage{titlesec} \titleformat*{\section}{\normalsize\bfseries} \titleformat*{\subsection}{\normalsize\itshape} \titleformat*{\subsubsection}{\normalsize\itshape} \titleformat*{\paragraph}{\normalsize\itshape} \titleformat*{\subparagraph}{\normalsize\itshape} \usepackage{natbib} \bibliographystyle{./resources/ajs.bst} %\renewcommand{\refname}{References} %\makeatletter %\renewcommand\bibsection{ % \section*{{\normalsize{\refname}}}% %}% %\makeatother \newtheorem{hypothesis}{Hypothesis} \usepackage{setspace} \makeatletter \@ifpackageloaded{hyperref}{}{% \ifxetex \usepackage[setpagesize=false, % page size defined by xetex unicode=false, % unicode breaks when used with xetex xetex]{hyperref} \else \usepackage[unicode=true]{hyperref} \fi } \@ifpackageloaded{color}{ \PassOptionsToPackage{usenames,dvipsnames}{color} }{% \usepackage[usenames,dvipsnames]{color} } \makeatother \hypersetup{breaklinks=true, bookmarks=true, pdfauthor={Olivia Atkinson (University of Oregon, Political Science)}, pdfkeywords = {maternal mortality, female physicans, gender norms and behavior}, pdftitle={Are Women Actually Better Doctors? The Relationship Between Female Physicians and Maternal Mortality Ratios}, colorlinks=true, citecolor=blue, urlcolor=blue, linkcolor=magenta, pdfborder={0 0 0}} \urlstyle{same} % don't use monospace font for urls \usepackage{endnotes} \newlength{\normalparindent} \setlength{\normalparindent}{\parindent} %prettier captions for figures and tables %I am making the text of figure captions smaller but not table captions \usepackage[labelfont=bf,labelsep=period]{caption} \captionsetup[figure]{font=footnotesize} \begin{document} % \pagenumbering{arabic}% resets `page` counter to 1 % \setcounter{page}{1} % \maketitle {% \usefont{T1}{pnc}{m}{n} \setlength{\parindent}{0pt} \thispagestyle{plain} {\fontsize{18}{20}\selectfont\raggedright \maketitle % title \par } { \vskip 13.5pt\relax \normalsize\fontsize{11}{12} \textbf{\authorfont Olivia Atkinson} \hskip 15pt \emph{\small University of Oregon, Political Science} } } \vskip 6.5pt \noindent \section{Introduction}\label{introduction} Maternal mortality rates in the United States are among some of the highest in developed countries. In other developed countries maternal mortality rates have been steadily falling, while rates in the U.S. are continuing to rise. This seems especially troubling for a country like the United States where access to care and treatment is and should be easy, hypothetically. However, while the U.S. as a whole has access to some of the best medical resources in the world, only a certain portion of the population are able to access those resources and the care that they need. Additionally, maternal mortality rates are hard to calculate and accurately record. Although the rising rates of maternal mortality are caused by a myriad of reasons ranging from the increasing age of mothers to more structural issues like a broken and complicated healthcare system it should not deter reserachers from conducting research that can better uncover the reasons (even if they are multiple) leading to the alarming rise of maternal deaths. In fact, the lack of data and the difficulty of conducting such research should be a motivating factor for other researchers and scholars who are invested in finding underlying causes of maternal deaths and suggesting policy solutions to help alleviate them. Another trend that has been on the rise in the United States is the percentage of female physicians. Today, the majority of physicians under the age of 35 are women (M. Johnson 2018). With the number of female physicians on the rise, I am interested in looking at the relationship between the maternal mortality ratios and the percentage of patients who see female doctors. The main question this paper seeks to answer is: Does seeing a female physician decrease a woman's chances of dying from pregnancy related causes? Because of gendered norms and behavior, women are thought to be and generally are more caring, attentive, and emotionally available compared to their male counterparts who are seen as aggressive, unwelcoming, and less communicative. This translates in a number of areas in social life, and this paper is interested in seeing if it would also transfer into the medical profession, as well. There have been a number of studies conducted that look at the relationship between patient care and physician gender (C. Y. Johnson 2016, Berthold, et. al. 2008, Tsugawa et. al. 2017). However, none of the studies have looked specifically at a physician's gender and its relationship to maternal mortality. As such, this project hopes to fill a gap in the literature and also encourage further studies that look at this specific relationship. \section{Data and Methods}\label{data-and-methods} Data on maternal mortality and natality was collected from the CDC WONDER database. WONDER is an acronymn for Wide-ranging Online Data for Epidemiologic Research and provides public health data and information to the public. The maternal mortality ratio was calculated by dividing the total number of maternal deaths in a census region by the total number of births in a census region from the years 2000-2016. The data for ``Detailed Mortality - Underlying Cause of Death''" is based on death certificates for U.S. residents. The detailed mortality data is compiled from data provided by the 57 vital statistics jurisdictions through the Vital Statistics Cooperative. The data set is produced by the U.S. Department of Health and Human Services, Centers for Disease Control and Prevention, National Center for Health Statistics, and the Division of Vital Statistics, Mortality Statistics Branch. (\url{https://wonder.cdc.gov/DataSets.html}). The natality data is the number of live births occurring within a given year. The natality data is divided up into three databases (1995-2002, 2003-2006, and 2007-2017) because of changes in reporting standards regarding race in 2003. Data for physician gender was taken from the IPUMS Health Surveys in the Medical Expenditure Panel Survey. The MEPS is a survey conducted through five rounds of interviews over a period of roughly two years. The data for physician gender reports whether the individual's usual source of care provider is male or female. To be asked the question about their physician's gender, survery respondents had to be eligible for the Access to Care supplement. To be eligible, individuals had to be current, non-institutionalized members of the responding unit in round 2 for panel members in relative year 1 and round 4 for panel members in relative year 2. In the maternal mortality dataset I created a subset of the region, year, and deaths variables. In the natality dataset I created a subset of region, year, and births variables. I then created the maternal mortality ratio by first merging the maternal deaths data with the births data then divided deaths by births. For the data on physician gender, I dropped all responses that were male, unknown, or coded ``not in universe''. From there, I subset the female physician data by year and region. Finally, I merged the maternal mortality ratio data with the percent female doctor data. The data for maternal deaths were only available at the census region level. Thus, the results and any analysis drawn from the results are very crude. Also, the data on a individual's physician's gender was self-reported and only certain people were eligible to answer the question. However, this still provides a good starting point for further research on this topic. The data is also limited because no other independent variables were controlled for in such as age, income, etc. However, I felt it was unnecessary to include these variables since I was working on such an abstracted level (i.e.~census region). If this project was conducted on the state level, other variables such as the ones previously listed would be useful to include in the analysis. I used a two-way fixed effects OLS regression analysis that used within fixed effects for census region and year. This allowed me to rule out spuriousness from the time-constant covariates of percent female and maternal mortality. Because both of these trends have been steadily rising over the years, it seemed as though as the number of female physicians increased so did the maternal mortality ratio, disproving my hypothesis. However, once I put in dummy variables for year the relationship between female physicians and maternal mortality changed to now show a positive effect between increased rates of female physicians and maternal mortality rates. In other words, as the percentage of female physicians went up, the maternal mortality ratio decreased. I also included a dummy variable for region to soak up any within region differences that might be accounting for a change in maternal mortality rates to get a better look at the relationship between physician gender and maternal mortality rates. The first figure is a bivariate graph that shows the relationship between the percentage of female physicians with maternal mortality rates without accounting for the simultaneous increase over the years. I multiplied the the maternal mortality ratio by 100,000 in the first and second graph because the numbers were small therefore making the graph more easily understandable. The second and third figures separate out the maternal mortality ratio and the percentage of female physicians by year to show that each is increasing. The models I ran are OLS two-way within fixed effects regression tables. The only model of particular importance for this study is Model 4, which is the two-way fixed effects model with dummy variables for both region and year. \section{Results}\label{results} Figure 1 is a bivariate graph that shows the maternal mortality ratio by percent female physicians. As you can see, in every census region as the percent of female doctors increases so does the maternal mortality ratio. This is exactly opposite of the expected results based on priors and outside literature. Figure 2 and Figure 3 are both bivariate graphs showing that maternal mortality and the percent of female doctors has been increasing over the years. Because both maternal mortality rates and the percentage of female doctors have been increasing when looking at the relationship together it seems as though an increase in the percent of female doctors correlates to an increase in maternal deaths. This relationship is shown in the regression table, as well. Model 2 shows the maternal mortality ratio by percent female doctor. For every one percent increase in female doctors there is a .2 percent increase in the maternal mortality ratio, on average. However, as Model 4 shows when the year and region are fitted with dummy variables the relationship between the percentage of female physicians and the maternal mortality ratio flips. The maternal mortality ratio actually decreases as the percentage of female physicians increases. However, this relationship is not statistically significant. On average, for every one percent increase in female physicians there is a 1.6 percent decrease in the maternal mortality ratio. While this does not seem that significant, as mentioned above in the data section, this data is at the census region level so any analysis or results will be somewhat crudely generalizable. Although the R-squared values are significant, this is a fixed effects model so the R-squared values are basically meaningless because they include all of the time-constant variation making them inaccurate. Looking at the data by region, it becomes clear that there are major disparities between regions, especially the South and the West. As shown in Figure 2, the West has the lowest number of maternal deaths and the South has the most. The Midwest and Northeast are relatively similar. In Figure 3, the South has the lowest percentage of female physicians whereas the West has the highest. Again, the Midwest and the Northeast are relatively similar in the middle. \includegraphics{main_files/figure-latex/name1-1.pdf} \begin{figure} \centering \includegraphics{main_files/figure-latex/name2-1.pdf} \caption{maternal mortality ratio by year} \end{figure} \begin{figure} \centering \includegraphics{main_files/figure-latex/name3-1.pdf} \caption{percent female physicians by year} \end{figure} \begin{table} \caption{Regression Model Predicting Maternal Mortality Ratios by Percent Female Doctors} \begin{center} \begin{tabular}{l c c c c } \hline & Model 1 & Model 2 & Model 3 & Model 4 \\ \hline (Intercept) & $-8.441^{***}$ & $-8.383^{***}$ & $-9.394^{***}$ & $-7.960^{***}$ \\ & $(0.054)$ & $(0.557)$ & $(0.263)$ & $(0.469)$ \\ regionNE & $-0.037$ & & $-0.044$ & $-0.033$ \\ & $(0.048)$ & & $(0.057)$ & $(0.048)$ \\ regionS & $0.190^{***}$ & & $0.279^{***}$ & $0.147^{*}$ \\ & $(0.048)$ & & $(0.061)$ & $(0.063)$ \\ regionW & $-0.392^{***}$ & & $-0.414^{***}$ & $-0.381^{***}$ \\ & $(0.048)$ & & $(0.058)$ & $(0.049)$ \\ as.factor(year)2011 & $0.119$ & & & $0.155^{*}$ \\ & $(0.064)$ & & & $(0.073)$ \\ as.factor(year)2012 & $0.147^{*}$ & & & $0.207^{*}$ \\ & $(0.064)$ & & & $(0.086)$ \\ as.factor(year)2013 & $0.283^{***}$ & & & $0.370^{**}$ \\ & $(0.064)$ & & & $(0.106)$ \\ as.factor(year)2014 & $0.264^{***}$ & & & $0.362^{**}$ \\ & $(0.064)$ & & & $(0.114)$ \\ as.factor(year)2015 & $0.245^{**}$ & & & $0.354^{*}$ \\ & $(0.064)$ & & & $(0.124)$ \\ as.factor(year)2016 & $0.355^{***}$ & & & $0.471^{**}$ \\ & $(0.064)$ & & & $(0.129)$ \\ percent\_female & & $0.002$ & $0.033^{***}$ & $-0.016$ \\ & & $(0.016)$ & $(0.007)$ & $(0.015)$ \\ \hline R$^2$ & 0.915 & 0.001 & 0.847 & 0.920 \\ Adj. R$^2$ & 0.873 & -0.038 & 0.820 & 0.873 \\ Num. obs. & 28 & 28 & 28 & 28 \\ RMSE & 0.090 & 0.258 & 0.107 & 0.090 \\ \hline \multicolumn{5}{l}{\scriptsize{$^{***}p<0.001$, $^{**}p<0.01$, $^*p<0.05$}} \end{tabular} \label{table:coefficients} \end{center} \end{table} \section{Conclusions}\label{conclusions} In conclusion, this project looked at how the percentage of female physicians effected the maternal mortality rate. The data tells us that the percentage of female physicians has an effect on maternal mortality ratios. While these results were not statistically significant, I hypothesize that better data at a smaller unit of analysis will show similar but stronger results. As such, further research should be conducted regarding this issue. As stated previously, the results drawn from this study are crude at best. However, it does offer a starting point for other more in-depth research on the relationship between maternal mortality ratios and the gender of a patient's physician. This obviously should not discourage men from practicing medicine. However, it should highlight how engrained gender ideas, norms, and behaviors can have real-world effects. Male physicians should be more aware of how they interact with patients and take more care to listen to their patients, ask them how they feel about the care they are getting, discussing care adn treatment options openly with patients as well as being more open to engaging in conversation and welcoming questions and comments from patients throughout the process. On a larger scale, this points to how harmful gendered norms and behavior can be, not only for individual people but for those they interact with, too. Working to dismantle these stereotypes and expectations from a young age will have large-scale effects in the long run. Any studies on maternal mortality and its underlying causes should be taken seriously as it is a trend in the United States that is increasing. The health and wellness of women is something that, as we have seen recently, is not taken seriously by certain governmental bodies. Further, there should be more research conducted to understand the differences between regional differences of maternal mortality and what can be done to alleviate those causes. The South is a rural area and many women do not have immediate access to the care that they need pre- and post-natal. \section{References}\label{references} Berthold, H. K., Gouni‐Berthold, I. , Bestehorn, K. P., Böhm, M. and Krone, W. (2008), Physician gender is associated with the quality of type 2 diabetes care. Journal of Internal Medicine, 264: 340-350. \url{doi:10.1111/j.1365-2796.2008.01967.x} Johnson, Carolyn Y. Women really are better doctors, study suggests. December 19, 2016. \url{https://www.washingtonpost.com/news/wonk/wp/2016/12/19/women-really-are-better-doctors-study-suggests/?utm_term}=.8cf695e2035e (accessed May 30, 2019). \textless{}\textless{}\textless{}\textless{}\textless{}\textless{}\textless{} HEAD Johnson, Megan. The healthcare future is female. February 14, 2018. \url{https://www.athenahealth.com/insight/healthcare-future-female} (accessed June 10, 2019). \section{\texorpdfstring{Tsugawa Y, Jena AB, Figueroa JF, Orav EJ, Blumenthal DM, Jha AK. Comparison of Hospital Mortality and Readmission Rates for Medicare Patients Treated by Male vs Female Physicians. JAMA Intern Med. 2017;177(2):206--213. \url{doi:10.1001/jamainternmed.2016.7875}}{Tsugawa Y, Jena AB, Figueroa JF, Orav EJ, Blumenthal DM, Jha AK. Comparison of Hospital Mortality and Readmission Rates for Medicare Patients Treated by Male vs Female Physicians. JAMA Intern Med. 2017;177(2):206--213. doi:10.1001/jamainternmed.2016.7875}}\label{tsugawa-y-jena-ab-figueroa-jf-orav-ej-blumenthal-dm-jha-ak.-comparison-of-hospital-mortality-and-readmission-rates-for-medicare-patients-treated-by-male-vs-female-physicians.-jama-intern-med.-20171772206213.-doi10.1001jamainternmed.2016.7875} Johnson, Carolyn Y. Women really are better doctors, study suggests . December 16, 2016. \url{https://www.washingtonpost.com/news/wonk/wp/2016/12/19/women-really-are-better-doctors-study-suggests/?utm_term}=.37ed10ba316f (accessed June 05, 2019). Berthold, H. K., Gouni‐Berthold, I. , Bestehorn, K. P., Böhm, M. and Krone, W. (2008), Physician gender is associated with the quality of type 2 diabetes care. Journal of Internal Medicine, 264: 340-350. \url{doi:10.1111/j.1365-2796.2008.01967.x} \textgreater{}\textgreater{}\textgreater{}\textgreater{}\textgreater{}\textgreater{}\textgreater{} ddba4504cc1fe7d5ead73024da7a1328029170dc \bibliography{../project.bib} \end{document}
{ "alphanum_fraction": 0.7181933564, "avg_line_length": 45.698, "ext": "tex", "hexsha": "e6f0e1d6aa1399d7389bc76feac2207fd757a9d6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "95afb2f4ce9badba34bb824bc05d915fa34ed39d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "oatkinsonr/Soc_513_Final_Project", "max_forks_repo_path": "products/paper/main.tex", "max_issues_count": 8, "max_issues_repo_head_hexsha": "95afb2f4ce9badba34bb824bc05d915fa34ed39d", "max_issues_repo_issues_event_max_datetime": "2021-02-09T17:20:18.000Z", "max_issues_repo_issues_event_min_datetime": "2019-05-09T20:17:19.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "oatkinsonr/Soc_513_Final_Project", "max_issues_repo_path": "products/paper/main.tex", "max_line_length": 548, "max_stars_count": 1, "max_stars_repo_head_hexsha": "95afb2f4ce9badba34bb824bc05d915fa34ed39d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "oatkinsonr/Soc_513_Final_Project", "max_stars_repo_path": "products/paper/main.tex", "max_stars_repo_stars_event_max_datetime": "2019-04-24T18:11:08.000Z", "max_stars_repo_stars_event_min_datetime": "2019-04-24T18:11:08.000Z", "num_tokens": 5727, "size": 22849 }
\documentclass[a4paper]{ltxdoc} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Packages %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{amsmath} \usepackage[margin=3cm]{geometry} \usepackage{calc} \usepackage{tikz} \usetikzlibrary{shadows,fit} % \usetikzlibrary fails because file is not in current directory, lazy to setup TEXINPUTS \makeatletter \input{../tikzlibraryzx-calculus.code.tex} \makeatother % Loads the great package that produces tikz-like manual (see also tikzcd for examples) \input{pgfmanual-en-macros.tex} % Is supposed to be included in recent TeX distributions, but I get errors... \usepackage{makeidx} % Produces an index of commands. \makeindex % Useful or not index will be created \usepackage{alertmessage} % For warning, info... \usepackage[hidelinks]{hyperref} \newcommand{\mylink}[2]{\href{#1}{#2}\footnote{\url{#1}}} \usepackage{verbatim} \usepackage{cleveref} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Documentation %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %%% Title: thanks tikzcd for the styling \begin{center} \vspace*{1em} % Thanks tikzcd \tikz\node[scale=1.2]{% \color{gray}\Huge\ttfamily \char`\{\raisebox{.09em}{\textcolor{red!75!black}{zx\raisebox{-0.1em}{-}calculus}}\char`\}}; \vspace{0.5em} {\Large\bfseries ZX-calculus with \tikzname} \vspace{1em} {Léo Colisson \quad Version 2022/02/09}\\[3mm] {\href{https://github.com/leo-colisson/zx-calculus}{\texttt{github.com/leo-colisson/zx-calculus}}} \end{center} \tableofcontents \section{Introduction} This library (based on the great \tikzname{} and \tikzname-cd packages) allows you to typeset ZX-calculus directly in \LaTeX{}. It comes with a default---but highly customizable---style: \begin{codeexample}[] \begin{ZX} \zxZ{\alpha} \arrow[r] & \zxFracX-{\pi}{4} \end{ZX} \end{codeexample} Even if this has not yet been tested a lot, you can also use a ``phase in label'' style, without really changing the code: \begin{codeexample}[] \begin{ZX}[phase in label right] \zxZ{\alpha} \arrow[d] \\ \zxFracX-{\pi}{4} \end{ZX} \end{codeexample} The goal is to provide an alternative to the great |tikzit| package: we wanted a solution that does not require the creation of an additional file, the use of an external software, and which automatically adapts the width of columns and rows depending on the content of the nodes (in |tikzit| one needs to manually tune the position of each node, especially when dealing with large nodes). Our library also provides a default style and tries to separate the content from the style: that way it should be easy to globally change the styling of a given project without redesigning all diagrams. However, it should be fairly easy to combine tikzit and this library: when some diagrams are easier to design in tikzit, then it should be possible to directly load the style of this library inside tikzit. This library is quite young, so feel free to propose improvements or report issues on \href{https://github.com/leo-colisson/zx-calculus/issues}{\texttt{github.com/leo-colisson/zx-calculus/issues}}. We will of course try to maintain backward compatibility as much as possible, but we can't guarantee at 100\% that small changes (spacing, wire looks\dots{}) won't be made later. In case you want a completely unalterable style, just copy the two files of this library in your project forever (see installation)! \section{Installation} If your CTAN distribution is recent enough, you can directly insert in your file: % verse indents stuff, index adds to the index of command at the end of the file, || is a shortcut of \verb|| \begin{verse} \index{zx@\protect\texttt{zx-calculus} package}% \index{Packages and files!zx-calculus@\protect\texttt{zx-calculus}}% |\usepackage{zx-calculus}|% \end{verse} or load \tikzname{} and then use: \begin{verse}% \index{cd@\protect\texttt{zx-calculus} library}% \index{Libraries!cd@\protect\texttt{zx-calculus}}% |\usetikzlibrary{zx-calculus}|% \end{verse} If this library is not yet packaged into CTAN (which is very likely in 2021), you must first download \mylink{https://github.com/leo-colisson/zx-calculus/blob/main/tikzlibraryzx-calculus.code.tex}{\texttt{tikzlibraryzx-calculus.code.tex}} and \mylink{https://github.com/leo-colisson/zx-calculus/blob/main/zx-calculus.sty}{\texttt{zx-calculus.sty}} (right-click on ``Raw'' and ``Save link as'') and save them at the root of your project. \section{Quickstart} You can create a diagram either with |\zx[options]{matrix}|, |\zxAmp[options]{matrix}| or with: \begin{verse} |\begin{ZX}[options]|\\ | matrix|\\ |\end{ZX}| \end{verse} The matrix is composed of rows separated by |\\| and columns separated by |&| (except in |\zxAmp| where columns are separated with |\&|). % \alertwarning{ % Can't use |...| or \verb|...| in this environment :'( % Due to \LaTeX{} restrictions, \texttt{\&} can sometimes cause some troubles. \texttt{\textbackslash{}zxAmp} is always guaranteed to work (just make sure to use \texttt{\textbackslash{}\&} instead of \texttt{\&}). See \cref{subsec:addDiagram} for more details. % } This matrix is basically a \tikzname{} matrix of nodes (even better, a |tikz-cd| matrix, so you can use all the machinary of |tikz-cd|), so cells can be created using \verb#|[tikz style]| content#. However, the users does not usually need to use this syntax since many nodes like |\zxZ{spider phase}| have been created for them (including |\zxN{}| which is an empty node): \begin{codeexample}[width=0pt] \begin{ZX} \zxZ{} & \zxZ{\alpha} & \zxZ-{\alpha} & \zxZ{\alpha+\beta} & \zxFracZ{\pi}{2} & \zxFracZ-{\pi}{2}\\ \zxX{} & \zxX{\alpha} & \zxX-{\alpha} & \zxX{\alpha+\beta} & \zxFracX{\pi}{2} & \zxFracX-{\pi}{2}\\ \zxN{} & \zxH{} \end{ZX} \end{codeexample} Note that if a node has no argument like |\zxN|, you should still end it like |\zxN{}| to make sure you code will be backward compatible and will behave correctly. To link the nodes, you should use |\arrow[options]| (|\ar[options]| for short) at the end of a cell (you can put many arrows). The options can contain a direction, made of a string of |r| (for ``right''), |l| (for ``left''), |d| (for `down''), |u| (for ``up'') letters. That way, |\ar[rrd]| would be an arrow going right, right, and down: \begin{codeexample}[] \begin{ZX} \zxZ{} \ar[rrd] & \zxX{}\\ & & \zxX-{\alpha} \end{ZX} \end{codeexample} See how the alignment of your matrix helps reading it: in emacs |M-x align| is your friend (or even better, if you are tired of selecting the lines to align, bind \mylink{https://tex.stackexchange.com/a/64566/116348}{this \texttt{align-environment} function} to some shortcuts, like |C-<tab>| and you will just have to do a single key-press to align your matrix). You may also encounter some shortcuts, like |\rar| instead of |\ar[r]|. Since straight lines are boring, we created many styles that you can just add in the options. For instance, a measured Bell-pair can be created using the |C| style (note also how the |*| argument forces the node to be tighter): \begin{codeexample}[] \begin{ZX} \zxZ*{a \pi} \ar[d,C]\\ \zxZ*{b \pi} \end{ZX} \end{codeexample} The name of the style usually tries to graphically represent the shape of a node (here it looks like a |C|). We also introduce many other styles, like |N| for wires that arrive and leave at wide angle (yeah, the |N| is the best letter I could find to fit that shape): \begin{codeexample}[] \begin{ZX} \zxN{} & \zxZ{\beta}\\ \zxZ{\alpha} \ar[ru,N] \ar[rd,N] &\\ & \zxZ{\gamma} \end{ZX} \end{codeexample} Or |s| for wires that arrive and leave at sharp angles (the |\zxN{}| is used because it seems that the first cell of a matrix can't be empty): \begin{codeexample}[] \begin{ZX} \zxN{} & \zxZ{\beta}\\ \zxZ{\alpha} \ar[ru,s] \ar[rd,s] &\\ & \zxZ{\gamma} \end{ZX} \end{codeexample} You have then different variations of a style depending on the shape and/or direction of it. For instance, if we want the arrival of the |N| wire to be flat, use |N-|: \begin{codeexample}[] \begin{ZX} \zxZ{\alpha} \ar[rd,N-] \\ & \zxZ{\beta} \end{ZX} \end{codeexample} Similarly |o'| is a style for wires that have the shape of the top part of the circle, and comes with variations depending on the part of the circle that must be kept: \begin{codeexample}[width=0pt] \begin{ZX} \zxZ{\alpha} \ar[r,o',green] \ar[r,o.,red] \ar[d,o-,blue] \ar[d,-o,purple] & \zxZ{\beta}\\ \zxZ{\beta} \end{ZX} \end{codeexample} Note that the position of the embellishments (|'|, |-|, |.|\dots{}) tries to graphically represent the shape of the node. That way |-o| means ``take the left part (position of |-|) of the circle |o|''. Applied to |C|, this gives: \begin{codeexample}[] \begin{ZX} \zxX{} \ar[d,C] \ar[r,C'] & \zxZ{} \ar[d,C-]\\ \zxZ{} \ar[r,C.] & \zxX{} \end{ZX} \end{codeexample} You also have styles which automatically add another node in between, for instance |H| adds a Hadamard node in the middle of the node: \begin{codeexample}[] \begin{ZX} \zxZ{\alpha} \ar[r,o',H] \ar[r,o.,H] &[\zxHCol] \zxZ{\beta} \end{ZX} \end{codeexample} Note that we used also |&[\zxHCol]| instead of |&| (on the first line). This is useful to add an extra space between the columns to have a nicer look. The same apply for rows (see the |*Row| instead of |*Col|): \begin{codeexample}[] \begin{ZX} \zxZ{\alpha} \ar[d,-o,Z] \ar[d,o-,X] \\[\zxSRow] \zxX{\beta} \end{ZX} \end{codeexample} The reason for this is that it is hard to always get exactly the good spacing by default (for instance here \tikzname{} has no idea that a |H| node will be inserted when it starts to build the diagram), and sometimes the spacing needs some adjustments. However, while you could manually tweak this space using something like |&[1mm]| (it adds |1mm| to the column space), it is better to use some pre-configured spaced that can be (re)-configured document-wise to keep a uniform spacing. You could define your own spacing, but we already provide a list for the most important spacings. They all start with |zx|, then you find the type of space: |H| for Hadamard, |S| for spiders, |W| when you connect only |\zxNone| nodes (otherwise the diagram will be too shrinked), |w| when one side of the row contains only |\zxNone|\dots{} and then you find |Col| (for columns spacing) or |Row| (for rows spacing). For instance we can use the |\zxNone| style (|\zxN| for short) style and the above spacing to obtain this: \begin{codeexample}[] \begin{ZX} \zxN{} \rar &[\zxwCol] \zxH{} \rar &[\zxwCol] \zxN{} \end{ZX} \end{codeexample} \noindent or that: \begin{codeexample}[] \begin{ZX} \zxN{} \ar[d,C] \ar[dr,s] &[\zxWCol] \zxN{} \\[\zxWRow] \zxN{} \ar[ru,s] & \zxN{} \\ \end{ZX} \end{codeexample} When writing equations, you may also want to change the baseline to align properly your diagrams on a given line like that (|a=blabla| gives the alias name |blabla| to the node, and configure tools useful for debugging): \begin{codeexample}[] $\zx[math baseline=myZ]{ \zxX{}\\ \zxZ[a=myZ]{} } = \zx{\zxX{} & \zxZ{}}$ \end{codeexample} We also provide easy methods like |phase in label right| to change the labelling of a note (per-node, per-picture or document wise) to move the phase in a label automatically: \begin{codeexample}[] \begin{ZX}[phase in label right] \zxZ{\alpha} \arrow[d] \\ \zxFracX-{\pi}{4} \end{ZX} \end{codeexample} Now you should know enough to start your first diagrams. The rest of the documentation will go through all the styles, customizations and features, including the one needed to obtain: \begin{codeexample}[width=3cm] \begin{ZX} \leftManyDots{n} \zxX{\alpha} \zxLoopAboveDots{} \middleManyDots{} \ar[r,o'={a=75}] & \zxX{\beta} \zxLoopAboveDots{} \rightManyDots{m} \end{ZX} \end{codeexample} \noindent You will also see some tricks (notably based on alias) to create clear bigger diagrams, like this debug mode which turns { \begin{ZX}[zx row sep=1pt, execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,)] % Column 4 \ar[from=mypi2,to=toprightpi,('] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[from=Xdown,to=out3,<.] \end{ZX} into % {% \def\zxDebugMode{}%%%% \begin{ZX}[zx row sep=1pt, execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,)] % Column 4 \ar[from=mypi2,to=toprightpi,('] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[from=Xdown,to=out3,<.] \end{ZX} } \ (of course it only helps during the construction).\\ You will also see how you can customize the styles, and how you can easily extand this library to get any custom diagram: {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[width=0pt] { % \usetikzlibrary{shadows} \tikzset{ my bloc/.style={ anchor=center, inner sep=2pt, inner xsep=.7em, minimum height=3em, draw, thick, fill=blue!10!white, double copy shadow={opacity=.5},tape, } } \zx{|[my bloc]| f \rar &[1mm] |[my bloc]| g \rar &[1mm] \zxZ{\alpha} \rar & \zxNone{}} } \end{codeexample} } If you have some questions, suggestions, or bugs, please report them on \texttt{\url{https://github.com/leo-colisson/zx-calculus/issues}}. \textbf{Tips}: if you are unsure of the definition of a style in an example, just click on it, a link will point to its definition. Also, if your pdf viewer does not copy/paste these examples correctly, you can copy them from the source code of this documentation available \mylink{https://github.com/leo-colisson/zx-calculus/blob/main/doc/zx-calculus.tex}{here} (to find the example, just use the ``search'' function of your web browser). \section{Usage} \subsection{Add a diagram}\label{subsec:addDiagram} \begin{pgfmanualentry} \extractcommand\zx\opt{\oarg{options}}\marg{your diagram}\@@ \extractenvironement{ZX}\opt{\oarg{options}}\@@ \extractcommand\zxAmp\opt{\oarg{options}}\marg{your diagram}\@@ \pgfmanualbody You can create a new ZX-diagram either with a macro (quicker for inline diagrams) or with an environment. All these commands are mostly equivalent, except that in |\zxAmp| columns are separated with |\&| instead of |&| (this was useful before as |&| was not usable in |align| or inside macros. Not it should be fixed.). The \meta{options} can be used to locally change the style of the diagram, using the same options as the |{tikz-cd}| environment (from the \mylink{https://www.ctan.org/pkg/tikz-cd}{\texttt{tikz-cd} package}). The \meta{your diagram} argument, or the content of |{ZX}| environment is a \tikzname{} matrix of nodes, exactly like in the |tikz-cd| package: rows are separated using |\\|, columns using |&| (except for |\zxAmp| where columns are separated using |\&|), and nodes are created using \verb#|[tikz style]| node content# or with shortcut commands presented later in this document (recommended). Wires can be added like in |tikz-cd| (see more below) using |\arrow| or |\ar|: we provide later recommended styles to quickly create different kinds of wires which can change with the configured style. Content is typeset in math mode by default, and diagrams can be included in any equation. {\catcode`\|=12 % Ensures | is not anymore \verb|...| % Do not indent not to put space in final code \begin{codeexample}[] Spider \zx{\zxZ{\alpha}}, equation $\zx{\zxZ{}} = \zx{\zxX{}}$ % and custom diagram: % \begin{ZX}[red] \zxZ{\beta} \arrow[r] & \zxZ{\alpha} \\ |[fill=pink,draw]| \gamma \arrow[ru,bend right] \end{ZX} \end{codeexample} } \end{pgfmanualentry} \begin{stylekey}{/tikz/defaultEnv/amp} In a previous version (before 2022/02/09), it was not possible to use |&| inside macros and |align| due to \LaTeX{} limitations. However, we found a solution by re-scanning the tokens, so now no special care should be taken in align or macros. But in case you need to deal with an environment having troubles with |&|, either use the |ampersand replacement=\&| option (whose shortcut is |amp|) or |\zxAmp| (in any case, replace |&| with |\&|). \begin{codeexample}[vbox] An aligned equation: \begin{align} \zxAmp{\zxZ{} \arrow[r] \& \zxX{}} &= \begin{ZX}[amp] \zxX{} \arrow[r] \& \zxZ{} \end{ZX} \end{align} This limitation does not apply anymore: \begin{align} \zx{\zxZ{} \arrow[r] & \zxX{}} &= \begin{ZX} \zxX{} \arrow[r] & \zxZ{} \end{ZX} \end{align} even in macros: {\setlength{\fboxsep}{0pt} \fbox{\zx{\zxZ{} \rar & \zxX{}}}} \end{codeexample} \end{stylekey} \subsection{Nodes} The following commands are useful to create different kinds of nodes. Always add empty arguments like |\example{}| if none are already present, otherwise if you type |\example| we don't guarantee backward compatibility. \begin{command}{\zxEmptyDiagram{}} Create an empty diagram. \begin{codeexample}[width=3cm] \begin{ZX} \zxEmptyDiagram{} \end{ZX} \end{codeexample} \end{command} \begin{pgfmanualentry} \extractcommand\zxNone\opt{+}\opt{-}\marg{text}\@@ \extractcommand\zxN\opt{+}\opt{-}\marg{text}\@@ \extractcommand\zxNL\@@ \extractcommand\zxNR\@@ \pgfmanualbody Adds an empty node with |\zxNone{}| (alias |\zxN{}|). The \verb#-|+# decorations are used to add a bit of horizontal (\verb#\zxNone-{}#), vertical (\verb#\zxNone|{}#) and both (\verb#\zxNone+{}#) spacing (I don't know how to add \verb#|# in the documentation of the function). |\zxNone| is just a coordinate (and therefore can't have any text inside, but when possible this node should be preferred over the other versions since it has really zero width), but |\zxNone-{}| and \verb#\zxNone|{}# are actually nodes with |inner sep=0| along one direction. For that reason, they still have a tiny height or width (impossible to remove as far as I know). If you don't want to get holes when connecting multiple wires to them, it is therefore necessary to use |\zxNone{}| or the |wire centered| style (alias |wc|) (if you are using the |IO| mode, see also the |between none| style). But anyway you should mostly use |\zxNone|. Moreover, you should also add column and row spacing |&[\zxWCol]| and |\\[\zxWRow]| to avoid too shrinked diagrams when only wires are involved. \begin{codeexample}[width=3cm] \begin{ZX} \zxNone{} \ar[C,d] \ar[rd,s] &[\zxWCol] \zxNone{}\\[\zxWRow] \zxNone{} \ar[ru,s] & \zxNone{} \end{ZX} \end{codeexample} Use |&[\zxwCol]| (on the first line) and/or |\\[\zxwRow]| when a single None node is connected to the wire to add appropriate spacing (this spacing can of course be redefined to your preferences): \begin{codeexample}[] Compare \begin{ZX} \zxN{} \rar & \zxZ{} \rar & \zxN{} \end{ZX} with \begin{ZX} \zxN{} \rar &[\zxwCol] \zxZ{} \rar &[\zxwCol] \zxN{} \end{ZX} \end{codeexample} This kind of code is so common that there is an alias for it: |\zxNL| and |\zxNR| automatically add a |\zxN{}| node, configure the column space (for this reason don't add an additional |&|, and be aware that emacs won't align them properly. Note also that the space will only be taken into account if it is on the first line) and add a straight arrow. The |L/R| part of the name is just to specify if the node is on the right or left of the diagram to put the column and arrow on the right side: \begin{codeexample}[] \zx{\zxNL \zxX{} \zxNR} \end{codeexample} Note that these two alias can be used without |{}|. But they are the only ones. The \verb!\zxN|{text}! and \verb!\zxN-{text}! may be useful to display some texts: {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[] \begin{ZX}[content fixed baseline] & \zxN|{\dots} \dar \\ \zxZ{\theta_i} \rar & \zxZ{} \dar \rar & \zxZ{-(\delta_i+\theta_i+r\pi)}\rar & \zxH{} \rar & \zxX{a\pi} \\ & \zxN|{\dots} \\ \end{ZX} \end{codeexample} } When the top left cell is empty, you may get an error at the compilation |Single ampersand used with wrong catcode| (this error should be fixed in latest releases) or |<symbol> allowed only in math mode| (not sure why). To solve this issue, you can add an empty node on the very first cell: \begin{codeexample}[] \begin{ZX} \zxN{} &[\zxwCol] \zxN{} \ar[d]\\[\zxwRow] \zxNone{} \rar & \zxZ{} \end{ZX} \end{codeexample} \end{pgfmanualentry} You may also get the error |Single ampersand used with wrong catcode| when |&| has already a different meaning, for instance in |align|, in that case you may change the |&| character into |\&| using |[ampersand replacement=\&]|. Note however that in recent versions ($\geq$ 2022/02/09) this should not happen anymore. \begin{codeexample}[vbox] \begin{align} \begin{ZX}[ampersand replacement=\&] \zxN{} \rar \&[\zxWCol] \zxN{} \end{ZX} &= \begin{ZX}[ampersand replacement=\&] \zxN{} \rar \&[\zxwCol] \zxZ{} \rar \&[\zxwCol] \zxN{} \end{ZX}\\ &= \begin{ZX}[ampersand replacement=\&] \zxN{} \rar \&[\zxwCol] \zxX{} \rar \&[\zxwCol] \zxN{} \end{ZX} \end{align} \end{codeexample} \begin{command}{\zxNoneDouble\opt{+-}\marg{text}} Like |\zxNone|, but the spacing for \verb#+-|# is large enough to fake two lines in only one. Not extremely useful (or one needs to play with |start anchor=south,end anchor=north|). {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[width=3cm] \begin{ZX} \zxNoneDouble|{} \ar[r,s,start anchor=north,end anchor=south] \ar[r,s,start anchor=south,end anchor=north] &[\zxWCol] \zxNoneDouble|{} \end{ZX} \end{codeexample} } \end{command} \begin{command}{\zxFracZ\opt{-}\marg{numerator}\opt{\oarg{numerator with parens}\oarg{denominator with parens}}\marg{denominator}} Adds a Z node with a fraction, use the minus decorator to add a small minus in front (a normal minus would be too big, but you can configure the symbol). \begin{codeexample}[width=3cm] \begin{ZX} \zxFracZ{\pi}{2} & \zxFracZ-{\pi}{2} \end{ZX} \end{codeexample} The optional arguments are useful when the numerator or the denominator need parens when they are written inline (in that case optional arguments must be specified): it will prove useful when using a style that writes the fraction inline, for instance the default style for labels: \begin{codeexample}[] Compare \begin{ZX} \zxFracZ{a+b}[(a+b)][(c+d)]{c+d} \end{ZX} with % \begin{ZX}[phase in label right] \zxFracZ{a+b}[(a+b)][(c+d)]{c+d} \end{ZX} \end{codeexample} \end{command} \begin{command}{\zxFracX\opt{-}\marg{numerator}\marg{denominator}} Adds an X node with a fraction. \begin{codeexample}[width=3cm] \begin{ZX} \zxFracX{\pi}{2} & \zxFracX-{\pi}{2} \end{ZX} \end{codeexample} \end{command} \begin{command}{\zxZ\opt{\oarg{other styles}*-}\marg{text}} Adds a Z node. \meta{other styles} are optional \tikzname{} arguments (the same as the one provided to |tikz-cd|) They should be use with care, and if possible moved to the style directly to keep a consistent look across the paper. \begin{codeexample}[width=3cm] \begin{ZX} \zxZ{} & \zxZ{\alpha} & \zxZ{\alpha + \beta} & \zxZ[fill=blue!50!white,text=red]{(a \oplus b)\pi} \end{ZX} \end{codeexample} The optional |-| optional argument is to add a minus sign (customizable, see |\zxMinusInShort|) in front of a very short expression and try to keep a circular shape. This is recommended notably for single letter expressions. \begin{codeexample}[width=3cm] Compare \zx{\zxZ{-\alpha}} with \zx{\zxZ-{\alpha}}. Labels: \zx[pila]{\zxZ{-\alpha}} vs \zx[pila]{\zxZ-{\alpha}}. \end{codeexample} The |*| optional argument is to force a condensed style, no matter what is the text inside. This can be practical \emph{sometimes}: \begin{codeexample}[width=3cm] Compare \zx{\zxN{} \rar &[\zxwCol] \zxZ{a\pi}} with \zx{\zxN{} \rar &[\zxwCol] \zxZ*{a\pi}}. \end{codeexample} \noindent but you should use it as rarely as possible (otherwise, change the style directly). See that it does not always give nice results: \begin{codeexample}[width=3cm] Compare \zx{\zxZ{-\alpha} \rar & \zxZ{\alpha+\beta}} with \zx{\zxZ*{-\alpha} \rar & \zxZ*{\alpha+\beta}}. Labels: \zx[pila]{\zxZ{-\alpha} \rar & \zxZ{\alpha+\beta}} vs \zx[pila]{\zxZ*{-\alpha} \rar & \zxZ*{\alpha+\beta}}. \end{codeexample} \end{command} \begin{command}{\zxX\opt{\oarg{other styles}*-}\marg{text}} Adds an X node, like for the Z node. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{} & \zxX{\alpha} & \zxX-{\alpha} & \zxX{\alpha + \beta} & \zxX[text=green]{(a \oplus b)\pi} \end{ZX} \end{codeexample} \end{command} \begin{command}{\zxH\opt{\oarg{other styles}}} Adds an Hadamard node. See also |H| wire style. \begin{codeexample}[width=3cm] \begin{ZX} \zxNone{} \rar & \zxH{} \rar & \zxNone{} \end{ZX} \end{codeexample} \end{command} \begin{command}{\leftManyDots\opt{\oarg{text scale}\oarg{dots scale}}\marg{text}} Shortcut to add a dots and a text next to it. It automatically adds the new column, see more examples below. Internally, it uses |3 dots| to place the dots, and can be reproduced using the other nodes around. Note that this node automatically adds a new cell, so you should \emph{not} use |&|. \begin{codeexample}[] \begin{ZX} \leftManyDots{n} \zxX{\alpha} \end{ZX} \end{codeexample} \end{command} \begin{command}{\leftManyDots\opt{\oarg{text scale}\oarg{dots scale}}\marg{text}} Shortcut to add a dots and a text next to it. It automatically adds the new column, see more examples below. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \rightManyDots{m} \end{ZX} \end{codeexample} \end{command} \begin{command}{\middleManyDots{}} Shortcut to add a dots and a text next to it, see more examples below. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \middleManyDots{} & \zxX{\beta} \end{ZX} \end{codeexample} \end{command} \begin{command}{\zxLoop\opt{\oarg{direction angle}\oarg{opening angle}\oarg{other styles}}} Adds a loop in \meta{direction angle} (defaults to $90$), with opening angle \meta{opening angle} (defaults to $20$). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \zxLoop{} & \zxX{} \zxLoop[45]{} & \zxX{} \zxLoop[0][30][red]{} \end{ZX} \end{codeexample} \end{command} \begin{command}{\zxLoopAboveDots\opt{\oarg{opening angle}\oarg{other styles}}} Adds a loop above the node with some dots. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \zxLoopAboveDots{} \end{ZX} \end{codeexample} \end{command} \noindent The previous commands can be useful to create this figure: \begin{codeexample}[width=0pt]% Forces code/example on two lines. \begin{ZX} \leftManyDots{n} \zxX{\alpha} \zxLoopAboveDots{} \middleManyDots{} \ar[r,o'={a=75}] & \zxX{\beta} \zxLoopAboveDots{} \rightManyDots{m} \end{ZX} \end{codeexample} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/styles/rounded style/content vertically centered\@nil% \extractkey/zx/styles/rounded style/content fixed baseline\@nil% \extractkey/zx/styles/rounded style preload/content vertically centered\@nil% \extractkey/zx/styles/rounded style preload/content fixed baseline\@nil% \extractkey/zx/styles/rounded style preload/content fixed also frac\@nil% \makeatother \pgfmanualbody By default the content of the nodes are vertically centered. This can be nice to have as much space as possible around the text, but when using several nodes with letters having different height or depth, the baseline of each node won't be aligned (this is particularly visible on nodes with very high text, like |b'|): \begin{codeexample}[width=0pt] \begin{ZX} \zxX[a=start]{\alpha} & \zxX{\beta} & \zxX{a} & \zxX{b} & \zxX*{a\pi} & \zxX*{b\pi} & \zxX*{b'\pi} & \zxZ*{'b\pi} & \zxZ{(a \oplus b )\pi} & \zxFracX-{\pi}{4} & \zxFracZ{\pi}{4} & \zxZ-{\delta} & \zxZ[a=end]-{\gamma} \ar[from=start,to=end,on layer=background] \end{ZX} \end{codeexample} Using |content fixed baseline|, it is however possible to fix the height and depth of the text to make sure the baselines are aligned (|content vertically centered| is use to come back to the default behavior). When used as a ZX option, it avoids setting this on fractions since it renders poorly. Use |content fixed baseline also frac| if you also want to fix the baseline of all fractions as well (this last style is useful only as a ZX option, since |content fixed baseline| works on all nodes). \begin{codeexample}[width=0pt] \begin{ZX}[content fixed baseline] \zxX[a=start]{\alpha} & \zxX{\beta} & \zxX{a} & \zxX{b} & \zxX*{a\pi} & \zxX*{b\pi} & \zxX*{b'\pi} & \zxZ*{'b\pi} & \zxZ{(a \oplus b )\pi} & \zxFracX-{\pi}{4} & \zxFracZ{\pi}{4} & \zxZ-{\delta} & \zxZ[a=end]-{\gamma} \ar[from=start,to=end,on layer=background] \end{ZX} \end{codeexample} Note however that the height is really hardcoded (not sure how to avoid that) and is quite small (otherwise nodes quickly become too large), so too large content may overlay on top of the node (this is visible on the |'b\pi| node). You can use this style either on a per-picture basis (it's what we just did), on a per-node basis (just use it in the options of the node), or globally: \begin{codeexample}[width=0pt] \tikzset{ /zx/user overlay/.style={ content fixed baseline, }, } \begin{ZX} \zxX[a=start]{\alpha} & \zxX{\beta} & \zxX{a} & \zxX{b} & \zxX*{a\pi} & \zxX*{b\pi} & \zxX*{b'\pi} & \zxZ*{'b\pi} & \zxZ{(a \oplus b )\pi} & \zxFracX-{\pi}{4} & \zxFracZ{\pi}{4} & \zxZ-{\delta} & \zxZ[a=end]-{\gamma} \ar[from=start,to=end,on layer=background] \end{ZX} \end{codeexample} It can also be practical to combine it with |small minus|: \begin{codeexample}[] \begin{ZX} \zxZ-{\delta_j} & \zxZ[content fixed baseline]-{\delta_j} & \zxZ[small minus]-{\delta_j} & \zxZ[content fixed baseline,small minus]-{\delta_j} \end{ZX} \end{codeexample} \end{pgfmanualentry} \subsection{Phase in label style} We also provide styles to place the phase on a label next to an empty node (not yet very well tested): \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/styles/rounded style/phase in content\@nil% \extractkey/zx/styles/rounded style/phase in label=style (default {})\@nil% \extractkey/zx/styles/rounded style/pil=style (default {})\@nil% \extractkey/zx/styles/rounded style/phase in label above=style (default {})\@nil% \extractkey/zx/styles/rounded style/pila=style (default {})\@nil% \extractkey/zx/styles/rounded style/phase in label below=style (default {})\@nil% \extractkey/zx/styles/rounded style/pilb=style (default {})\@nil% \extractkey/zx/styles/rounded style/phase in label right=style (default {})\@nil% \extractkey/zx/styles/rounded style/pilr=style (default {})\@nil% \extractkey/zx/styles/rounded style/phase in label left=style (default {})\@nil% \extractkey/zx/styles/rounded style/pill=style (default {})\@nil% \makeatother \pgfmanualbody The above styles are useful to place a spider phase in a label outside the node. They can either be put on the style of a node to modify a single node at a time: \begin{codeexample}[] \zx{\zxX[phase in label]{\alpha} \rar & \zxX{\alpha}} \end{codeexample} \noindent It can also be configured on a per-figure basis: \begin{codeexample}[] \zx[phase in label right]{ \zxZ{\alpha} \dar \\ \zxX{\alpha} \dar \\ \zxZ{}} \end{codeexample} \noindent or globally: \begin{codeexample}[] \tikzset{ /zx/user overlay/.style={ phase in label={label position=-45, text=purple,fill=none} } } \zx{ \zxFracX-{\pi}{2} } \end{codeexample} Note that we must use |user post preparation labels| and not |/zx/user overlay nodes| because this will be run after all the machinery for labels has been setup. While |phase in content| forces the content of the node to be inside the node instead of inside a label (which is the default behavior), all other styles are special cases of |phase in label|. The \meta{style} parameter can be any style made for a tikz label: \begin{codeexample}[width=3cm] \zx{ \zxX[phase in label={label position=45, text=purple}]{\alpha} } \end{codeexample} For ease of use, the special cases of label position |above|, |below|, |right| and |left| have their respective shortcut style. The |pil*| versions are shortcuts of the longer style written above. For instance, |pilb| stands for |phase in label below|. Note also that by default labels will take some space, but it's possible to make them overlay without taking space using the |overlay| label style\dots{} however do it at your own risks as it can overlay the content around (also the text before and after): \begin{codeexample}[width=0pt] \zx{ \zxZ[pilb]{\alpha+\beta} \rar & \zxX[pilb]{\gamma} \rar & \zxZ[pilb=overlay]{\gamma+\eta} } \end{codeexample} The above also works for fractions: \begin{codeexample}[] \zx{\zxFracX[pilr]-{\pi}{2}} \end{codeexample} For fractions, you can configure how you want the label text to be displayed, either in a single line (default) or on two lines, like in nodes. The function |\zxConvertToFracInLabel| is in charge of that conversion, and can be changed to your needs to change this option document-wise. To use the same notation in both content and labels, you can do: \begin{codeexample}[width=0pt] Compare \begin{ZX}[phase in label right] \zxFracZ{\pi}{2} \dar \\ \zxFracZ{a+b}[(a+b)][(c+d)]{c+d} \end{ZX} with {\RenewExpandableDocumentCommand{\zxConvertToFracInLabel}{mmmmm}{ \zxConvertToFracInContent{#1}{#2}{#3}{#4}{#5}% } \begin{ZX}[phase in label right] \zxFracZ{\pi}{2} \dar \\ \zxFracZ{a+b}[(a+b)][(c+d)]{c+d} \end{ZX} (exact same code!) } \end{codeexample} Note that in |\zxFracZ{a+b}[(a+b)][(c+d)]{c+d}| the optional arguments are useful to put parens appropriately when the fraction is written inline. \end{pgfmanualentry} \begin{command}{\zxDebugMode{}} If this macro is defined, debug mode is active. See below how it can be useful. \end{command} \begin{stylekey}{/tikz/every node/a=alias} Shortcut to add an |alias| to a wire, and in debug mode it also displays the alias of the nodes next to it (very practical to quickly add wires as we will see later). To enable debug mode, just type |\def\zxDebugMode{}| before your drawing, potentially in a group like |{\def\zxDebugMode{} your diagram...}| if you want to apply it to a single diagram. This will be very practical later when using names instead of directions to connect wires (this can improve readability and maintainability). This is added automatically in |/tikz/every node| style. Note that debug mode is effective only for |a| and not |alias|. \begin{codeexample}[width=3cm] \begin{ZX} \zxX[a=A]{} & \zxZ[a=B]{\beta} \ar[from=A,to=B] \end{ZX} {\def\zxDebugMode{} %% Enable debug mode for next diagram% \begin{ZX} \zxX[a=A]{} & \zxZ[a=B]{\beta} \ar[from=A,to=B] \end{ZX} } \end{codeexample} \end{stylekey} \begin{stylekey}{/zx/defaultEnv/math baseline=node alias} You can easily change the default baseline which defaults to: \begin{verse} |baseline={([yshift=-axis_height]current bounding box.center)}| \end{verse} (|axis_height| is the distance to use to center equations on the ``mathematical axis'') by using this in the \meta{options} field of |\zx[options]{...}|. However, this can be a bit long to write, so |math baseline=yourAlias| is a shorcut to |baseline={([yshift=-axis_height]yourAlias)}|: \begin{codeexample}[width=0pt] Compare $\begin{ZX} \leftManyDots{n} \zxX{\alpha} \zxLoopAboveDots{} \middleManyDots{} \ar[r,o'={a=75}] & \zxX{\beta} \zxLoopAboveDots{} \rightManyDots{m} \end{ZX} = {\def\zxDefaultSoftAngleS{20} % useful to make the angle in \leftManyDots{} nicer. \begin{ZX} \leftManyDots{n} \zxX{\alpha+\beta} \rightManyDots{m} \end{ZX}}$ with $\begin{ZX}[math baseline=wantedBaseline] \leftManyDots{n} \zxX{\alpha} \zxLoopAboveDots{} \middleManyDots{} \ar[r,o'={a=75}] %% See here --v the node chosen as the baseline & \zxX[a=wantedBaseline]{\beta} \zxLoopAboveDots{} \rightManyDots{m} \end{ZX} = {\def\zxDefaultSoftAngleS{20} % useful to make the angle in \leftManyDots{} nicer. \begin{ZX} \leftManyDots{n} \zxX{\alpha+\beta} \rightManyDots{m} \end{ZX}}$ \end{codeexample} Also, if you find your diagram a bit ``too high'', check that you did not forget to remove a trailing |\\| at the end of the last line: \begin{codeexample}[width=3cm] Compare $\begin{ZX} \zxZ{} \rar[o'] \rar[o.] & \zxX{}\\ \zxZ{} \rar[o'] \rar[o.] \rar & \zxX{}\\ %% <--- remove last \\ \end{ZX} = \zx{\zxEmptyDiagram}$ with $\begin{ZX} \zxZ{} \rar[o'] \rar[o.] & \zxX{}\\ \zxZ{} \rar[o'] \rar[o.] \rar & \zxX{} \end{ZX} = \zx{\zxEmptyDiagram}$ \end{codeexample} \end{stylekey} \subsection{Wires} \subsubsection{Creating wires and debug mode} \begin{pgfmanualentry} \extractcommand\arrow\opt{\oarg{options}}\@@ \extractcommand\ar\opt{\oarg{options}}\@@ \pgfmanualbody These synonym commands (actually coming from |tikz-cd|) are used to draw wires between nodes. We refer to |tikz-cd| for an in-depth documentation, but what is important for our purpose is that the direction of the wires can be specified in the \meta{options} using a string of letters |r| (right), |l| (left), |u| (up), |d| (down). It's also possible to specify a node alias as a source or destination as shown below. \begin{codeexample}[] \zx{\zxZ{} \ar[r] & \zxX{}} = \zx{\zxX{} \arrow[rd] \\ & \zxZ{}} \end{codeexample} \meta{options} can also be used to add any additional style, either custom ones, or the ones defined in this library (this is recommended since it can be easily changed document-wise by simply changing the style). Multiple wires can be added in the same cell. Other shortcuts provided in |tikz-cd| like |\rar|\dots{} can be used. {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[width=0pt] \begin{ZX} \zxZ{\alpha} \arrow[d, C] % C = Bell-like wire \ar[r,H,o'] % o' = top part of circle % H adds Hadamard, combine with \zxHCol \ar[r,H,o.] &[\zxHCol] \zxZ{\gamma}\\ \zxZ{\beta} \rar & \zxX{} \ar[ld,red,"\circ" {marking,blue}] \ar[rd,s] \\ \zxFracX-{\pi}{4} & &\zxZ{} \end{ZX} \end{codeexample} } \end{pgfmanualentry} As explained in |tikz-cd|, there are further shortened forms: \begin{pgfmanualentry} \extractcommand\rar\opt{\oarg{options}}\@@ \extractcommand\lar\opt{\oarg{options}}\@@ \extractcommand\dar\opt{\oarg{options}}\@@ \extractcommand\uar\opt{\oarg{options}}\@@ \extractcommand\drar\opt{\oarg{options}}\@@ \extractcommand\urar\opt{\oarg{options}}\@@ \extractcommand\dlar\opt{\oarg{options}}\@@ \extractcommand\ular\opt{\oarg{options}}\@@ \pgfmanualbody \end{pgfmanualentry} The first one is equivalent to \begin{verse} |\arrow|{\oarg{options}}|{r}| \end{verse} and the other ones work analogously. Note that sometimes, it may be practical to properly organize big diagrams to increase readability. To that end, one surely wants to have a small and well indented matrix (emacs |M-x align-current| or |M-x align| (for selected lines) commands are very practical to indent matrices automatically). Unfortunately, adding wires inside the matrix can make the line really long and hard to read. Similarly, some nodes involving fractions or long expressions can also be quite long. It is however easy to increase readability (and maintainability) by moving the wires at the very end of the diagram, using |a| (like |alias|, but with a debug mode) to connect nodes and |\def| to create shortcuts. Putting inside a macro with |\def| long node definitions can also be useful to keep small items in the matrix: \begin{codeexample}[width=0pt] \begin{ZX}[zx row sep=1pt, execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align-current" is practical to automatically format it. %%% a is for 'alias'... but also provides a debug mode, see below. & & & & \zxZ[a=toprightpi]{\pi} \\ \zxN[a=in1]{} & \zxX[a=X1]{} & & & & \zxN[a=out1]{} \\ & & \zxZ[a=xmiddle]{} & \mypitwo{} & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \Zpifour{} & & \zxX[a=Xdown]{} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1] \ar[from=in2,to=Zpi4] % Column 2 \ar[from=X1,to=xmiddle,(.] \ar[from=X1,to=toprightpi,<',H] \ar[from=Zpi4,to=xmiddle,('] \ar[from=Zpi4,to=Xdown,o.] % Column 3 \ar[from=xmiddle,to=Xdown,s.] \ar[from=xmiddle,to=mypi2] % Column 4 \ar[from=mypi2,to=toprightpi,('] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2] \ar[from=Xdown,to=out3] \end{ZX} \end{codeexample} In that setting, it is often useful to enable the debug mode via |\def\zxDebugMode{}| as explained above to quickly visualize the alias given to each node (note that debug mode works with |a=| but not with |alias=|). For instance, it was easy to rewrite the above diagram by moving nodes in the matrix and arrows after checking their name on the produced pdf (NB: you can increase |column sep| and |row sep| temporarily to make the debug information more visible): \begin{codeexample}[width=0pt] { \def\zxDebugMode{}%%%% \begin{ZX}[zx row sep=1pt, execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,)] % Column 4 \ar[from=mypi2,to=toprightpi,('] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[from=Xdown,to=out3,<.] \end{ZX} } \end{codeexample} \subsubsection{Wire styles (new generation)} We give now a list of wire styles provided in this library (|/zx/wires definition/| is an automatically loaded style). We recommend using them instead of manual styling to ensure they are the same document-wise, but they can of course be customized to your need. Note that the name of the styles are supposed (ahah, I do my best with what ASCII provides) to graphically represent the action of the style, and some characters are added to precise the shape: typically |'| means top, |.| bottom, |X-| is right to X (or should arrive with angle 0), |-X| is left to X (or should leave with angle zero). These shapes are usually designed to work when the starting node is left most (or above of both nodes have the same column). But they may work both way for some of them. Note that the first version of that library (which appeared one week before this new version\dots{} hopefully backward compatibility won't be too much of a problem) was using |in=| and |out=| to create these styles. However, it turns out to be not very reliable since the shape of the wire was changing (sometimes importantly) depending on the position of the nodes. This new version should be more reliable, but the older styles are still available by using |IO, nameOfWirestyle| (read more in \cref{subsub:IOwires}). \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/C=radius ratio (default 1)\@nil% \extractkey/zx/wires definition/C.=radius ratio (default 1)\@nil% \extractkey/zx/wires definition/C'=radius ratio (default 1)\@nil% \extractkey/zx/wires definition/C-=radius ratio (default 1)\@nil% \makeatother \pgfmanualbody Bell-like wires with an arrival at ``right angle'', |C| represents the shape of the wire, while |.| (bottom), |'| (top) and |-| (side) represent (visually) its position. Combine with |wire centered| (|wc|) to avoid holes when connecting multiple wires (not required with |\zxNone{}|, alias |\zxN{}|). \begin{codeexample}[] A Bell pair \zx{\zxNone{} \ar[d,C] \\[\zxWRow] \zxNone{}} , a swapped Bell pair \begin{ZX} \zxN{} \ar[d,C] \ar[rd,s] &[\zxWCol] \zxN{} \\[\zxWRow] \zxN{} \ar[ru,s] & \zxN{} \end{ZX} and a funny graph \begin{ZX} \zxX{} \ar[d,C] \ar[r,C'] & \zxZ{} \ar[d,C-]\\ \zxZ{} \ar[r,C.] & \zxX{} \end{ZX}. \end{codeexample} Note that this style is actually connecting the nodes using a perfect circle (it is \emph{not} based on |curve to|), and therefore should \emph{not} be used together with |in|, |out|, |looseness|\dots{} (this is the case also for most other styles except the ones in |IO|). It has the advantage of connecting nicely nodes which are not aligned or with different shapes: \begin{codeexample}[] \begin{ZX} \zxX{\alpha} \ar[dr,C]\\ & \zxNone{} \end{ZX} \end{codeexample} The \meta{radius ratio} parameter can be used to turn the circle into an ellipse using this ratio between both axis: \begin{codeexample}[] \begin{ZX} \zxX{\alpha} \ar[dr,C=0.5,red] \ar[dr,C,green] \ar[dr,C=2,blue] \ar[dr,C=3,purple]\\ & \zxNone{} \end{ZX} \begin{ZX} \zxX{} \ar[d,C=2] \ar[r,C'=2] & \zxZ{} \ar[d,C-=2,H]\\ \zxZ{} \ar[r,C.=2] & \zxX{} \end{ZX}. \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/o'=angle (default 40)\@nil% \extractkey/zx/wires definition/o.=angle (default 40)\@nil% \extractkey/zx/wires definition/o-=angle (default 40)\@nil% \extractkey/zx/wires definition/-o=angle (default 40)\@nil% \makeatother \pgfmanualbody Curved wire, similar to |C| but with a soften angle (optionally specified via \meta{angle}, and globally editable with |\zxDefaultLineWidth|). Again, the symbols specify which part of the circle (represented with |o|) must be kept. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{} \ar[d,-o] \ar[d,o-]\\ \zxZ{} \ar[r,o'] \ar[r,o.] & \zxX{} \end{ZX}. \end{codeexample} Note that these wires can be combined with |H|, |X| or |Z|, in that case one should use appropriate column and row spacing as explained in their documentation: \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[d,-o,H] \ar[d,o-,H]\\[\zxHRow] \zxZ{\beta} \rar & \zxZ{} \ar[r,o',X] \ar[r,o.,Z] &[\zxSCol] \zxX{} \end{ZX}. \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/(=angle (default 30)\@nil% \extractkey/zx/wires definition/)=angle (default 30)\@nil% \extractkey/zx/wires definition/('=angle (default 30)\@nil% \extractkey/zx/wires definition/('=angle (default 30)\@nil% \makeatother \pgfmanualbody Curved wire, similar to |o| but can be used for diagonal items. The angle is, like in |bend right|, the opening angle from the line which links the two nodes. For the first two commands, the |(| and |)| symbols must be imagined as if the starting point was on top of the parens, and the ending point at the bottom. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{} \ar[rd,(] \ar[rd,),red]\\ & \zxZ{} \end{ZX}. \end{codeexample} Then, |('|=|(| and |(.|=|)|; this notation is, I think, more intuitive when linking nodes from left to right. |('| is used when going to top right and |(.| when going to bottom right. \begin{codeexample}[width=3cm] \begin{ZX} \zxN{} & \zxX{}\\ \zxZ{} \ar[ru,('] \ar[rd,(.] & \\ & \zxX{} \end{ZX} \end{codeexample} When the nodes are too far appart, the default angle of |30| may produce strange results as it will go above (for |('|) the vertical line. Either choose a shorter angle, or see |<'| instead. Note that for now this node is based on |in| and |out|, but it may change later. So if you want to change looseness, or really rely on the precise specified angle, prefer to use |IO,(| instead (which takes the |IO| version, guaranteed to stay untouched). \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/start fake center north\@nil% \extractkey/zx/wires definition/start fake center south\@nil% \extractkey/zx/wires definition/start fake center east\@nil% \extractkey/zx/wires definition/start fake center west\@nil% \extractkey/zx/wires definition/start real center\@nil \extractkey/zx/wires definition/end fake center north\@nil% \extractkey/zx/wires definition/end fake center south\@nil% \extractkey/zx/wires definition/end fake center east\@nil% \extractkey/zx/wires definition/end fake center west\@nil% \extractkey/zx/wires definition/end real center\@nil \extractkey/zx/wires definition/left to right\@nil% \extractkey/zx/wires definition/right to left\@nil% \extractkey/zx/wires definition/up to down\@nil% \extractkey/zx/wires definition/down to up\@nil% \extractkey/zx/wires definition/force left to right\@nil% \extractkey/zx/wires definition/force right to left\@nil% \extractkey/zx/wires definition/force up to down\@nil% \extractkey/zx/wires definition/force down to up\@nil% \extractkey/zx/wires definition/no fake center\@nil% \makeatother \pgfmanualbody Usually each wire should properly use these functions, so the end user should not need that too often (during a first reading, you can skip this paragraph). We added 4 anchors to nodes: |fake center north|, |fake center south|, |fake center east| and |fake center west|. These anchors are used to determine the starting point of the wires depending on the direction of the wire (I tried to use more complex methods to ensure the wires would start on the boundary, but \mylink{https://tex.stackexchange.com/questions/619274}{they all fail}). Because some nodes may not have these anchors, we can't directly set |start anchor=fake center north, on layer=edgelayer| (but the user can do that if they are using only nodes with these anchors) or the code may fail on some nodes. For that reason, we check that these anchors exist while drawing our wires (which, at the best of my knowledge, can only be done while drawing the path). The |start/end fake center *| code is responsible to configure that properly (|start real center| will use the real center), and |left to right| (and similar) just configure both the |start| and |end| point to ensure the node starts at the appropriate anchor. However this won't work for style not defined in this library: in case you are sure that these anchors exists and want to use your own wire styles, you can then set the anchors manually and use |on layer=edgelayer|, or use |force left to right| (and similar) which will automatically do that for the |start| and |end| points. \begin{codeexample}[] \begin{ZX} \zxX{\alpha+\beta} \ar[r,o',no fake center] & \zxZ{\alpha+\beta}\\ \zxX{\alpha+\beta} \ar[r,o'] & \zxZ{\alpha+\beta} \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/args/-andL/-=x\@nil% \extractkey/zx/args/-andL/1-=x\@nil% \extractkey/zx/args/-andL/2-=x\@nil% \extractkey/zx/args/-andL/L=y\@nil% \extractkey/zx/args/-andL/1L=y\@nil% \extractkey/zx/args/-andL/2L=y\@nil% \extractkey/zx/args/-andL/1 angle and length=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/1al=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/2 angle and length=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/2al=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/angle and length=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/al=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/1 angle=\marg{angle}\@nil% \extractkey/zx/args/-andL/1a=\marg{angle}\@nil% \extractkey/zx/args/-andL/2 angle=\marg{angle}\@nil% \extractkey/zx/args/-andL/1a=\marg{angle}\marg{length}\@nil% \extractkey/zx/args/-andL/angle=\marg{angle}\@nil% \extractkey/zx/args/-andL/a=\marg{angle}\@nil% \extractkey/zx/args/-andL/symmetry-L\@nil% \extractkey/zx/args/-andL/symmetry\@nil% \extractkey/zx/args/-andL/negate1L\@nil% \extractkey/zx/args/-andL/negate2L\@nil% \extractkey/zx/args/-andL/negateL\@nil% \extractkey/zx/args/-andL/negate1-\@nil% \extractkey/zx/args/-andL/negate2-\@nil% \extractkey/zx/args/-andL/negate-\@nil% \extractkey/zx/args/-andL/oneMinus1-\@nil% \extractkey/zx/args/-andL/oneMinus2-\@nil% \extractkey/zx/args/-andL/oneMinus1L\@nil% \extractkey/zx/args/-andL/oneMinus2L\@nil% \makeatother \pgfmanualbody The next wires can take multiple options. They are all based on the same set of options for now, namely |/zx/args/-andL/|. The |1*| options are used to configure the starting point, the |2*| to configure the ending point, if no number is given both points are updated. |-| and |L| are used to place two anchors of a Bezier curve. They are expressed in relative distance (so they are typically between $0$ and $1$, but can be pushed above $1$ or below $0$ for stronger effects), |-| is typically on the |x| axis and |L| on the |y| axis (the name represents ``graphically'' the direction). They are however not named |x| and |y| because some wires use them slighlty differently, notably |o| which uses |-| for the direction of the arrow and |L| for the direction perpendicular to the arrow (again the shape of |L| represents a perpendicular line). Each wire interprets |-| and |L| to ensure that $0$ should lead to a straight line, and that a correct shape is obtained when |1-| equals |2-|, |1L| equals |2L| (except for non-symmetric shapes of course), and both |-| and |L| are positive. The other expressions involving |angle| (or the shortcut |a|) allow you to define |1-|,|1L|\dots{} using a maybe more intuitive ``polar'' notation, i.e.\ an ``angle'' and a relative length (if not specified, like in |1 angle|, the length defaults to $0.6$). Note that the angle is not really an angle (it is an angle only when the nodes are placed at $45$ degrees, or for the |bezier x/y| variations), but a ``squeezed angle'' (when nodes are not at $45$ degrees, the shape is squeezed horizontally or vertically not to change the wire) and similarly for the length. In the above list, the meaning of each expression should be clear from the name: for instance |1angle and length={45}{.8}| will setup a squeezed angle of $45$ and a relative length of $.8$ for the first point, i.e.\ this is equivalent to $1-=.8\cos(45)$ and $1L=.8\sin(45)$, and |angle=45| will change the angle of both points to $45$, with a relative length of $.6$. In the above list, each long expression has below it a shorter version, for intance |a=45| is equivalent to |angle=45|. The last expressions (|symmetry-L|, |symmetry|\dots) are used internally to do some math. Of course if you need to do symmetries at some point you can use these keys (|symmetry-L| exchange |-| and |L|, and |symmetry| exchanges |1| and |2|), |negateX| just negates |X| and |oneMinusX| replaces |X| with |1-X|. Each of the following nodes have default values which can be configured as explained in \cref{subsec:wirecustom}. \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/s=-andL config (default defaultS)\@nil% \extractkey/zx/wires definition/s'=-andL config (default defaultS')\@nil% \extractkey/zx/wires definition/s.=-andL config (default defaultS')\@nil% \extractkey/zx/wires definition/-s=-andL config (default default-S)\@nil% \extractkey/zx/wires definition/-s'=-andL config (default \{defaultS',default-S\})\@nil% \extractkey/zx/wires definition/-s.=-andL config (default \{defaultS',default-S\})\@nil% \extractkey/zx/wires definition/s-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \extractkey/zx/wires definition/s'-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \extractkey/zx/wires definition/s.-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \extractkey/zx/wires definition/-S=-andL config (default \{defaultS',default-S\})\@nil% \extractkey/zx/wires definition/-S'=-andL config (default \{defaultS',default-S\})\@nil% \extractkey/zx/wires definition/-S.=-andL config (default \{defaultS',default-S\})\@nil% \extractkey/zx/wires definition/S-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \extractkey/zx/wires definition/S'-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \extractkey/zx/wires definition/S.-=-andL config (default \{defaultS',default-S,symmetry\})\@nil% \makeatother \pgfmanualbody |s| and |S| are used to create a s-like wire, to have nicer diagonal lines between nodes. Other versions are soften versions (the input and output angles are not as sharp. Adding |'| or |.| specifies if the wire is going up-right or down-right, however as of today if it mostly used for backward compatibility since, for instance, |-s'| is the same as |-s| (but some styles may want to do a difference later). The only exception is for |s|/|s'|/|s.|: |s| has a sharper output angle than |s'| and |s.| (which are both equals). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[s,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[s.,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[S,ru] \\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[s',ru] \\ \end{ZX} \end{codeexample} |-| forces the angle on the side of |-| to be horizontal. Because for now the wires start inside the node, this is not very visible. For that reason, versions with a capital |S| have an anchor on the side of |-| lying on the surface of the node (|S| has two such anchors since both inputs and outputs arrives horizontally) instead of on the |fake center *| anchor (see explanation on |fake center| anchors above). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[s.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[-s.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[s.-,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[S,rd] \\ & \zxZ{\beta}\\ \end{ZX} \end{codeexample} It is possible to configure it using the options in |-andL config| as explained above (default values are given in \cref{subsec:wirecustom}), where |-| is the (relative) position of the horizontal Bezier anchor and |L| its relative vertical position (to keep a |s|-shape, you should have |-|$>$|L|). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[rd,s.] \\ & \zxZ{\beta}\\ % same as s., configure globally using defaultS'\\ \zxX{\alpha} \ar[rd,s.={-=.8,L=.2}]\\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,s.={L=.4}] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,s.={L=0.1,-=1}] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,edge above, control points visible,s.={-=2}] \\ & \zxZ{\beta} \end{ZX} \end{codeexample} For the non-symmetric versions (involving a vertical arrival), you can configure each point separately using |1-| and |1L| (first point) and |2-| and |2L| (second points). You can also specify the ``squeezed angle'' and ``length'' of each point, for instance using the |1 angle and length={10}{.8}| option (short version is |1al={10}{.8}|) or both at the same time using |al={10}{.6}| (this last command being itself equivalent to |a=10|). As explained later |edge above| and |control points visible| can help you to visualize the control points of the underlying Bezier curve. \begin{codeexample}[width=3cm] \begin{ZX} \zxZ{} \ar[dr,s.={al={10}{.8}}]\\ &\zxZ{}\\ \zxZ{} \ar[edge above,control points visible,dr,s.={a=10}]\\ &\zxZ{} \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/ss=-andL config (default \{defaultS,symmetry-L\})\@nil% \extractkey/zx/wires definition/SS=-andL config (default \{defaultS,symmetry-L\})\@nil% \extractkey/zx/wires definition/ss.=-andL config (default \{defaultS',symmetry-L\})\@nil% \extractkey/zx/wires definition/.ss=-andL config (default \{defaultS',symmetry-L\}30)\@nil% \extractkey/zx/wires definition/sIs.=-andL config (default defaultSIS)\@nil% \extractkey/zx/wires definition/.sIs=-andL config (default \{defaultS',defaultSIS\})\@nil% \extractkey/zx/wires definition/ss.I-=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \extractkey/zx/wires definition/I.ss-=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \extractkey/zx/wires definition/SIS=-andL config (default \{defaultS',defaultSIS\})\@nil% \extractkey/zx/wires definition/.SIS=-andL config (default \{defaultS',defaultSIS\})\@nil% \extractkey/zx/wires definition/ISS=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \extractkey/zx/wires definition/SS.I=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \extractkey/zx/wires definition/I.SS=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \extractkey/zx/wires definition/SSI=-andL config (default \{defaultS',defaultSIS,symmetry\})\@nil% \makeatother \pgfmanualbody |ss| is similar to |s| except that we go from top to bottom instead of from left to right. The position of |.| says if the node is wire is going bottom right (|ss.|) or bottom left (|.ss|). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[ss,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[ss.,rd] \\ & \zxZ{\beta}\\ & \zxX{\beta} \ar[.ss,dl] \\ \zxZ{\alpha}\\ & \zxX{\beta} \ar[.ss={},dl] \\ \zxZ{\alpha}\\ \end{ZX} \end{codeexample} |I| forces the angle above (if in between the two |s|) or below (if on the same side as |.|) to be vertical. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[ss,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[sIs.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[ss.I,rd] \\ & \zxZ{\beta}\\ & \zxX{\beta} \ar[.sIs,dl] \\ \zxZ{\alpha}\\ & \zxX{\beta} \ar[I.ss,dl] \\ \zxZ{\alpha}\\ \end{ZX} \end{codeexample} The |S| version forces the anchor on the vertical line to be on the boundary. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[SS,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[SIS,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[SSI,rd] \\ & \zxZ{\beta}\\ & \zxX{\beta} \ar[.sIs,dl] \\ \zxZ{\alpha}\\ & \zxX{\beta} \ar[I.ss,dl] \\ \zxZ{\alpha}\\ \end{ZX} \end{codeexample} As for |s| it can be configured: \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[rd,SIS] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,SIS={1L=.4}] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,SIS={1L=.8}] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[rd,SIS={1L=1,2L=1}] \\ & \zxZ{\beta}\\ \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/N=-andL config (default defaultN)\@nil% \extractkey/zx/wires definition/N'=-andL config (default defaultN)\@nil% \extractkey/zx/wires definition/N.=-andL config (default defaultN)\@nil% \extractkey/zx/wires definition/-N=-andL config (default \{defaultN,defaultN-\})\@nil% \extractkey/zx/wires definition/-N'=-andL config (default \{defaultN,defaultN-\})\@nil% \extractkey/zx/wires definition/-N.=-andL config (default \{defaultN,defaultN-\})\@nil% \extractkey/zx/wires definition/N-=-andL config (default \{defaultN,defaultN-,symmetry\})\@nil% \extractkey/zx/wires definition/N'-=-andL config (default \{defaultN,defaultN-,symmetry\})\@nil% \extractkey/zx/wires definition/N.-=-andL config (default \{defaultN,defaultN-,symmetry\})\@nil% \extractkey/zx/wires definition/Nbase=-andL config (default defaultN)\@nil% \makeatother \pgfmanualbody |N| is used to create a left-to-right wire leaving at wide angle and arriving at wide angle (it's named |N| because it roughly have the shape of a capital |N|). In older versions, |'| and |.| was required to specify if the wire should go up-right or down-right, but it is not useful anymore (we keep it for compatibilty with |IO| styles and in case some styles want to do a distinction later). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[N,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[N,ru] \end{ZX} \end{codeexample} |-| forces the angle on the side of |-| to be horizontal. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[-N,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[N-,ru] \end{ZX} \end{codeexample} Like other wires, it can be configured using |-| (horizontal relative position of anchor points) and |L| (vertical relative position of anchor points, make sure to have |-|$<$|L| to have a |N|-looking shape), |al={angle}{relative length}|\dots{} \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[N,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[N={L=1.2},rd] \\ & \zxZ{\beta} \end{ZX} \end{codeexample} All these styles are based on Nbase (which should not be used directly), including the styles like |<|. If you wish to overwrite later |N|-like commands, but not |<|-like, then change |N|. If you wish to also update |<| commands, use |Nbase|. \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/NN=-andL config (default \{defaultN,symmetry-L,defaultNN\})\@nil% \extractkey/zx/wires definition/NN.=-andL config (default \{defaultN,symmetry-L,defaultNN\})\@nil% \extractkey/zx/wires definition/.NN=-andL config (default \{defaultN,symmetry-L,defaultNN\})\@nil% \extractkey/zx/wires definition/NIN=-andL config (default \{defaultN,symmetry-L,defaultNN,defaultNIN\})\@nil% \extractkey/zx/wires definition/INN=-andL config (default \{defaultN,symmetry-L,defaultNN,defaultNIN,symmetry\})\@nil% \extractkey/zx/wires definition/NNI=-andL config (default \{defaultN,symmetry-L,defaultNN,defaultNIN,symmetry\})\@nil% \makeatother \pgfmanualbody Like |N| but for diagrams read up-to-down or down-to-up. The |.| are maintly used for backward compatibility with |IO| style. % \begin{codeexample}[width=3cm] % \begin{ZX} % \zxX{\alpha} \ar[NN,rd] \\ % & \zxZ{\beta}\\ % & \zxZ{\alpha}\\ % \zxX{\beta} \ar[NN,ru] % \end{ZX} % \end{codeexample} % |I| forces the angle on the side of |I| to be vertical. % \begin{codeexample}[width=3cm] % \begin{ZX} % \zxX{\alpha} \ar[NIN,rd] \\ % & \zxZ{\beta}\\ % & \zxZ{\alpha}\\ % \zxX{\beta} \ar[NNI,ru] % \end{ZX} % \end{codeexample} % It can be configured like |N| using |-|, |L|\dots{} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/<'=-andL config (default like N-)\@nil% \extractkey/zx/wires definition/<.=-andL config (default like N-)\@nil% \extractkey/zx/wires definition/'>=-andL config (default like -N)\@nil% \extractkey/zx/wires definition/.>=-andL config (default like -N)\@nil% %\extractkey/zx/wires definition/^.=-andL config (default 60)\@nil% %\extractkey/zx/wires definition/.^=-andL config (default 60)\@nil% \extractkey/zx/wires definition/'v=-andL config (default like INN)\@nil% \extractkey/zx/wires definition/v'=-andL config (default like NNI)\@nil% \makeatother \pgfmanualbody |<'| and |<.| are similar to |N-|, except that the anchor of the vertical line is put on the boundary (similarly for |*>| and |-N|, |*v*| and |INN|, and |*^*| and |NIN|: |.^| and |^.| were not possible to put in this documentation since the documentation package does not like the |^| character). The position of |'| and |.| does not really matters anymore in new versions, but for backward compatibility with |IO| styles, and maybe forward compatibility (another style may need this information), it's cleaner to put |.| or |'| on the direction of the wire. It also helps the reader of your diagrams to see the shape of the wire. \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} & \zxZ{}\\ \zxX{} \ar[ru,<'] \ar[rd,<.] \\ \zxN{} & \zxZ{}\\ \end{ZX} \end{codeexample} \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} & \zxZ{}\\ \zxX{} \ar[ru,.>] \ar[rd,'>] \\ \zxN{} & \zxZ{}\\ \end{ZX} \end{codeexample} \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} & \zxFracX{\pi}{2} \ar[dl,.^] \ar[dr,^.] & \\ \zxZ{} & & \zxX{} \end{ZX} \end{codeexample} \begin{codeexample}[width=0cm] \begin{ZX} \zxZ{} & & \zxX{}\\ \zxN{} & \zxX{} \ar[ul,'v] \ar[ur,v'] & \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/3 dots=text (default =)\@nil% \extractkey/zx/wires definition/3 vdots=text (default =)\@nil% \makeatother \pgfmanualbody The styles put in the middle of the wire (without drawing the wire) $\dots$ (for |3 dots|) or $\vdots$ (for |3 vdots|). The dots are scaled according to |\zxScaleDots| and the text \meta{text} is written on the left. Use |&[\zxDotsRow]| and |\\[\zxDotsRow]| to properly adapt the spacing of columns and rows. \begin{codeexample}[width=3cm] \begin{ZX} \zxZ{\alpha} \ar[r,o'] \ar[r,o.] \ar[r,3 dots] \ar[d,3 vdots={$n$\,}] &[\zxDotsCol] \zxFracX{\pi}{2}\\[\zxDotsRow] \zxZ{\alpha} \rar & \zxFracX{\pi}{2} \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/H=style (default {})\@nil% \extractkey/zx/wires definition/Z=style (default {})\@nil% \extractkey/zx/wires definition/X=style (default {})\@nil% \makeatother \pgfmanualbody Adds a |H| (Hadamard), |Z| or |X| node (without phase) in the middle of the wire. Width of column or rows should be adapted accordingly using |\zxNameRowcolFlatnot| where |Name| is replaced by |H|, |S| (for ``spiders'', i.e.\ |X| or |Z|), |HS| (for both |H| and |S|) or |W|, |Rowcol| is either |Row| (for changing row sep) or |Col| (for changing column sep) and |Flatnot| is empty or |Flat| (if the wire is supposed to be a straight line as it requires more space). For instance: \begin{codeexample}[width=3cm] \begin{ZX} \zxZ{\alpha} \ar[d] \ar[r,o',H] \ar[r,o.,H] &[\zxHCol] \zxX{\beta}\\ \zxZ{\alpha} \ar[d,-o,X] \ar[d,o-,Z] \\[\zxHSRow] \zxX{\gamma} \end{ZX} \end{codeexample} The \meta{style} parameter can be used to add additional \tikzname{} style to the nodes, notably a position using |\ar[rd,-N.,H={pos=.35}]|. The reason for using that is that the wires start inside the nodes, therefore the ``middle'' of the wire is closer to the node when the other side is on an empty node. \begin{codeexample}[width=0pt] \begin{ZX}[zx row sep=0pt] \zxN{} \ar[rd,-N.,H={pos=.35}] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-,H={pos=1-.35}] \ar[rd,N.-,H={pos=1-.35}] & \\[\zxNRow] \zxN{} \ar[ru,-N',H={pos=.35}] & & \zxN{} \end{ZX} \end{codeexample} Note that it's possible to automatically start wires on the border of the node, but it is slower and create other issues, see \cref{subsec:wiresInsideOutside} for more details. The second option (also presented in this section) is to manually define the |start anchor| and |end anchor|, but it can change the shape of the wire). \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/wire centered\@nil% \extractkey/zx/wires definition/wc\@nil% \extractkey/zx/wires definition/wire centered start\@nil% \extractkey/zx/wires definition/wcs\@nil% \extractkey/zx/wires definition/wire centered end\@nil% \extractkey/zx/wires definition/wce\@nil% \makeatother \pgfmanualbody Forces the wires to start at the center of the node (|wire centered start|, alias |wcs|), to end at the center of the node (|wire centered end|, alias |wce|) or both (|wire centered|, alias |wc|). This may be useful, for instance in the old |IO| mode (see below) when nodes have different sizes (the result looks strange otherwise), or with some wires (like |C|) connected to |\ZxNone+| (if possible, use |\zxNone| (without any embellishment) since it does not suffer from this issue as it is a coordinate). See also |between none| to also increase looseness when connecting only wires (use |between none| \emph{only} in |IO| mode). \begin{codeexample}[width=3cm] \begin{ZX} \zxZ{} \ar[IO,o',r] \ar[IO,o.,r] & \zxX{\alpha}\\ \zxZ{} \ar[IO,o',r,wc] \ar[IO,o.,r,wc] & \zxX{\alpha} \end{ZX} \end{codeexample} % Without |wc| (note that because there is no node, we need to use |&[\zxWCol]| (for columns) and |\\[\zxWRow]| (for rows) to get nicer spacing): % \begin{codeexample}[width=3cm] % \zx{\zxNone{} \rar &[\zxWCol] \zxNone{} \rar &[\zxWCol] \zxNone{} } % \end{codeexample} % With |wc|: % \begin{codeexample}[width=3cm] % \zx{\zxNone{} \rar[wc] &[\zxWCol] \zxNone{} \rar[wc] &[\zxWCol] \zxNone{}} % \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/bezier=-andL config \@nil% \extractkey/zx/wires definition/bezier x=-andL config \@nil% \extractkey/zx/wires definition/bezier y=-andL config \@nil% \extractkey/zx/wires definition/bezier 4=\{x1\}\{y1\}\{x2\}\{y2\} \@nil% \extractkey/zx/wires definition/bezier 4 x=\{x1\}\{y1\}\{x2\}\{y2\} \@nil% \extractkey/zx/wires definition/bezier 4 y=\{x1\}\{y1\}\{x2\}\{y2\} \@nil% \makeatother \pgfmanualbody To create a bezier wire. These styles are not really meant to be used for the final user because they are long to type and would not be changed document-wise when the style is changed, but most styles are based on these styles. For the |bezier 4 *| versions, the two first arguments are the relative position of the first anchor (|x| and |y| position), the next two of the second anchor. In the |bezier *| versions, the value of |1-| will be the relative |x| position of the first point, |1L| the relative position of the second, and |2-| and |2L| will be for the second point (the advantage of this is that it is also possible to specify angles using |1al={angle}{length}|\dots{} as explained in the |-andL| syle). They are said to be relative in the sense that |{0}{0}| is the coordinate of the first point, and |{1}{1}| the second point. The |bezier x| and |bezier 4 y| are useful when the node are supposed to be horizontally or vertically aligned: the distance are now expressed as a fraction of the horizontal (respectively vertical) distance between the two nodes. Using relative coordinates has the advantage that if the nodes positions are moved, the aspect of the wire does not change (it is just squeezed), while this is not true with |in|/|out| wires which preserves angles but not shapes. \end{pgfmanualentry} \subsubsection{IO wires, the old styles}\label{subsub:IOwires} \begin{stylekey}{/zx/wires definition/IO} As explained above, wires were first defined using |in|, |out| and |looseness|, but this turned out to be sometimes hard to use since the shape of the wire was changing depending on the position. For example consider the differences between the older version: \begin{codeexample}[] \begin{ZX} \zxN{} & \zxZ{} \\ \zxZ{} \ar[ru,IO,N'] \ar[rd,IO,N.] &\\ & \zxZ{} \\ \end{ZX} \begin{ZX} \zxN{} & \zxZ{} \\[-3pt] \zxZ{} \ar[ru,IO,N'] \ar[rd,IO,N.] &\\[-3pt] & \zxZ{} \\ \end{ZX} \begin{ZX} \zxN{} & \zxZ{} \\[-5pt] \zxZ{} \ar[ru,IO,N'] \ar[rd,IO,N.] &\\[-5pt] & \zxZ{} \\ \end{ZX} \end{codeexample} \begin{codeexample}[] \begin{ZX} \zxN{} & \zxZ{} \\ \zxZ{} \ar[ru,N'] \ar[rd,N.] &\\ & \zxZ{} \\ \end{ZX} and the newer: \begin{ZX} \zxN{} & \zxZ{} \\[-3pt] \zxZ{} \ar[ru,N'] \ar[rd,N.] &\\[-3pt] & \zxZ{} \\ \end{ZX} \begin{ZX} \zxN{} & \zxZ{} \\[-5pt] \zxZ{} \ar[ru,N'] \ar[rd,N.] &\\[-5pt] & \zxZ{} \\ \end{ZX} \end{codeexample} Here is another example: \begin{codeexample}[] Before \begin{ZX} \zxNone{} \ar[IO,C,d,wc] \ar[rd,IO,s] &[\zxWCol] \zxNone{} \\[\zxWRow] \zxNone{} \ar[ru,IO,s] & \zxNone{} \end{ZX} after \begin{ZX} \zxNone{} \ar[C,d] \ar[rd,s] &[\zxWCol] \zxNone{} \\[\zxWRow] \zxNone{} \ar[ru,s] & \zxNone{} \end{ZX} \end{codeexample} This example led to the creation of the |bn| style, in order to try to find appropriate looseness values depending on the case\dots{} but it is harder to use and results are less predictable. The new method also allowed us to use |N| for both |N.| and |N'| styles (however we kept both versions for backward compatibility and in case later we want to make a distinction between nodes going doing or up). However, if you prefer the old style, you can just use them by adding |IO,| in front of the style name (styles are nested inside |IO|). Note however that the customization options are of course different. \end{stylekey} We list now the older |IO| styles: \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/C\@nil% \extractkey/zx/wires definition/IO/C\@nil% \extractkey/zx/wires definition/IO/C'\@nil% \extractkey/zx/wires definition/IO/C-\@nil% \makeatother \pgfmanualbody |IO| mode for the |C| wire (used for Bell-like shapes). \begin{codeexample}[] Bell pair \zx{\zxNone{} \ar[d,IO,C] \\[\zxWRow] \zxNone{}} and funny graph \begin{ZX} \zxX{} \ar[d,IO,C] \ar[r,C'] & \zxZ{} \ar[d,IO,C-]\\ \zxZ{} \ar[r,IO,C.] & \zxX{} \end{ZX}. \end{codeexample} Note that the |IO| version cannot really be used when nodes are not aligned (using |wc| can sometimes help with the alignment): \begin{codeexample}[] Normal \begin{ZX} \zxX{\alpha} \ar[dr,C]\\ & \zxNone{} \end{ZX}, and IO \begin{ZX} \zxX{\alpha} \ar[dr,IO,C]\\ & \zxNone{} \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/o'=angle (default 40)\@nil% \extractkey/zx/wires definition/IO/o.=angle (default 40)\@nil% \extractkey/zx/wires definition/IO/o-=angle (default 40)\@nil% \extractkey/zx/wires definition/IO/-o=angle (default 40)\@nil% \makeatother \pgfmanualbody |IO| version of |o|. Curved wire, similar to |C| but with a soften angle (optionally specified via \meta{angle}, and globally editable with |\zxDefaultLineWidth|). Again, the symbols specify which part of the circle (represented with |o|) must be kept. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{} \ar[d,IO,-o] \ar[d,IO,o-]\\ \zxZ{} \ar[r,IO,o'] \ar[r,IO,o.] & \zxX{} \end{ZX}. \end{codeexample} Note that these wires can be combined with |H|, |X| or |Z|, in that case one should use appropriate column and row spacing as explained in their documentation: \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[d,IO,-o,H] \ar[d,IO,o-,H]\\[\zxHRow] \zxZ{\beta} \rar & \zxZ{} \ar[r,IO,o',X] \ar[r,IO,o.,Z] &[\zxSCol] \zxX{} \end{ZX}. \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/(=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/)=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/('=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/('=angle (default 30)\@nil% \makeatother \pgfmanualbody |IO| version of |(| (so far they are the same, but it may change later, use this version if you want to play with |looseness|). Curved wire, similar to |o| but can be used for diagonal items. The angle is, like in |bend right|, the opening angle from the line which links the two nodes. For the first two commands, the |(| and |)| symbols must be imagined as if the starting point was on top of the parens, and the ending point at the bottom. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{} \ar[rd,IO,(] \ar[rd,IO,),red]\\ & \zxZ{} \end{ZX}. \end{codeexample} Then, |('|=|(| and |(.|=|)|; this notation is, I think, more intuitive when linking nodes from left to right. |('| is used when going to top right and |(.| when going to bottom right. \begin{codeexample}[width=3cm] \begin{ZX} \zxN{} & \zxX{}\\ \zxZ{} \ar[ru,IO,('] \ar[IO,rd,(.] & \\ & \zxX{} \end{ZX} \end{codeexample} When the nodes are too far appart, the default angle of |30| may produce strange results as it will go above (for |('|) the vertical line. Either choose a shorter angle, or see |<'| instead. \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/s\@nil% \extractkey/zx/wires definition/IO/s'=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/s.=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/-s'=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/-s.=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/s'-=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/s.-=angle (default 30)\@nil% \makeatother \pgfmanualbody |IO| version of |s|. |s| is used to create a s-like wire, to have nicer soften diagonal lines between nodes. Other versions are soften versions (the input and output angles are not as sharp, and the difference angle can be configured as an argument or globally using |\zxDefaultSoftAngleS|). Adding |'| or |.| specifies if the wire is going up-right or down-right. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,s,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,s.,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[IO,s,ru] \\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[IO,s',ru] \\ \end{ZX} \end{codeexample} |-| forces the angle on the side of |-| to be horizontal. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,s.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,-s.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,s.-,rd] \\ & \zxZ{\beta}\\ \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/ss\@nil% \extractkey/zx/wires definition/IO/ss.=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/.ss=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/sIs.=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/.sIs=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/ss.I-=angle (default 30)\@nil% \extractkey/zx/wires definition/IO/I.ss-=angle (default 30)\@nil% \makeatother \pgfmanualbody |IO| version of |ss|. |ss| is similar to |s| except that we go from top to bottom instead of from left to right. The position of |.| says if the node is wire is going bottom right (|ss.|) or bottom left (|.ss|). \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,ss,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,ss.,rd] \\ & \zxZ{\beta}\\ & \zxX{\beta} \ar[IO,.ss,dl] \\ \zxZ{\alpha}\\ & \zxX{\beta} \ar[IO,.ss,dl] \\ \zxZ{\alpha}\\ \end{ZX} \end{codeexample} |I| forces the angle above (if in between the two |s|) or below (if on the same side as |.|) to be vertical. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,ss,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,sIs.,rd] \\ & \zxZ{\beta}\\ \zxX{\alpha} \ar[IO,ss.I,rd] \\ & \zxZ{\beta}\\ & \zxX{\beta} \ar[IO,.sIs,dl] \\ \zxZ{\alpha}\\ & \zxX{\beta} \ar[IO,I.ss,dl] \\ \zxZ{\alpha}\\ \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/<'=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/<.=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/'>=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/.>=angle (default 60)\@nil% %\extractkey/zx/wires definition/IO/^.=angle (default 60)\@nil% %\extractkey/zx/wires definition/IO/.^=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/'v=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/v'=angle (default 60)\@nil% \makeatother \pgfmanualbody These keys are a bit like |('| or |(.| but the arrival angle is vertical (or horizontal for the |^| (up-down) and |v| (down-up) versions). As before, the position of the decorator |.|,|'| denote the direction of the wire. \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} & \zxZ{}\\ \zxX{} \ar[IO,ru,<'] \ar[IO,rd,<.] \\ \zxN{} & \zxZ{}\\ \end{ZX} \end{codeexample} \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} & \zxFracX{\pi}{2} \ar[IO,dl,.^] \ar[IO,dr,^.] & \\ \zxZ{} & & \zxX{} \end{ZX} \end{codeexample} \begin{codeexample}[width=0cm] \begin{ZX} \zxZ{} & & \zxX{}\\ \zxN{} & \zxX{} \ar[IO,ul,'v] \ar[IO,ur,v'] & \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/IO/N'=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/N.=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/-N'=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/-N.=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/N'-=angle (default 60)\@nil% \extractkey/zx/wires definition/IO/N.-=angle (default 60)\@nil% \makeatother \pgfmanualbody |IO| version of |N|. |N| is used to create a wire leaving at wide angle and arriving at wide angle. Adding |'| or |.| specifies if the wire is going up-right or down-right. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,N.,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[IO,N',ru] \\ \end{ZX} \end{codeexample} |-| forces the angle on the side of |-| to be horizontal. \begin{codeexample}[width=3cm] \begin{ZX} \zxX{\alpha} \ar[IO,-N.,rd] \\ & \zxZ{\beta}\\ & \zxZ{\alpha}\\ \zxX{\beta} \ar[IO,N'-,ru] \\ \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{stylekey}{/zx/wires definition/ls=looseness} Shortcut for |looseness|, allows to quickly redefine looseness. Use with care (or redefine style directly), and \emph{do not use on styles that are not in |IO|}, since they don't use the |in|/|out| mechanism (only |(|-like styles use |in|/|out|\dots{} for now. In case you want to change looseness of |(|, prefer to use |IO,(| as it is guaranteed to be backward compatible). We may try later to give a key |looseness| for these styles, but it's not the case for now. For |IO| styles, you can also change yourself other values, like |in|, |out|\dots \begin{codeexample}[] \begin{ZX} \zxZ{} \ar[rd,s] \\ & \zxX{}\\ \zxZ{} \ar[rd,IO,s] \\ & \zxX{}\\ \zxZ{} \ar[rd,IO,s,ls=3] \\ & \zxX{} \end{ZX} \end{codeexample} \end{stylekey} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/wires definition/between none\@nil% \extractkey/zx/wires definition/bn\@nil% \makeatother \pgfmanualbody When drawing only |IO| wires (normal wires would suffer from this parameter), the default looseness may not be good looking and holes may appear in the line. This style (whose alias is |bn|) should therefore be used when curved wires \emph{from the |IO| path} are connected together. In that case, also make sure to separate columns using |&[\zxWCol]| and rows using |\\[\zxWRow]|. \begin{codeexample}[width=3cm] A swapped Bell pair is % \begin{ZX} \zxNone{} \ar[IO,C,d,wc] \ar[rd,IO,s,bn] &[\zxWCol] \zxNone{} \\[\zxWRow] \zxNone{} \ar[ru,IO,s,bn] & \zxNone{} \end{ZX} \end{codeexample} \end{pgfmanualentry} \subsection{Externalize} Since 2022/02/08, it is possible to use the tikz |external| library to save compilation time. To load it, you need to add the following tikz libraries: \begin{verse} |\usetikzlibrary{external}|\\ |\usetikzlibrary{zx-calculus}|\\ |\tikzexternalize|\\ |\zxConfigureExternalSystemCallAuto|\\ \end{verse} Then, compile with shell-escape, for instance using |pdflatex -shell-escape yourfile.tex|. \textbf{WARNING}: if |external| is loaded before |zx-calculus|, you don't need to run |\zxConfigureExternalSystemCallAuto|. This command is only useful to ensure the system call used by |external| displays errors appropriately by configuring the interaction mode to match the one used by the parent compilation command. If you prefer to disable this feature to use |external|'s default, define `\def\zxDoNotPatchSystemCall{}` before loading the |zx-calculus| library. \alertwarning{Note however that this has not yet being extensively tested, and the \texttt{external} library has a few caveats presented below} \begin{itemize} \item If you change the order of the diagrams, or add a diagram in the middle of the document, all subsequent diagrams will be recompiled. This issue has been reported \mylink{https://github.com/pgf-tikz/pgf/issues/758}{here} and is caused by the fact that the figures are called \texttt{figure0},\dots,\texttt{figureN}. To limit this issue, you can regularly insert \texttt{\textbackslash{}tikzsetfigurename\{nameprefix\}} in your document with different names to avoid a full recompilation of the file (or using groups to change it for a single newly added equation). \item The \texttt{external} library uses the main file to recompile each picture, so if your file is large or loads a lot of libraries, it may \mylink{https://tex.stackexchange.com/questions/633175/tikz-externalize-is-much-slower-than-tikz-on-first-run}{take a while to compile a single diagram} (to give an example, this library takes 41 seconds to compile without the externalize library, with the externalize library it takes 14mn for the first run and 3 seconds for the next runs). For this reason, you may want to use \texttt{\textbackslash{}tikzset\{external/export=false\}} in a group to disable temporarily the external library while you are writing your diagram. \item If you compile once a diagram without any error, and recompile it after inserting an error, you will see an error while compiling. But if you recompile again, the error will disappear and the diagram that lastly succeeded to build will be inserted instead of the newly buggy diagram. This has been reported \mylink{https://github.com/pgf-tikz/pgf/issues/1137}{here}. \item The library |external| forces us to wrap our diagrams into a basically empty tikz-pictures to make it work. The current library will automatically wrap the diagrams when the |external| library is enabled, but you can customize how diagrams are wrapped manually: |\zxExternalAuto| (default) will wrap it automatically if |external| is enabled, |\zxExternalWrap| will always wrap it, |\zxExternalNoWrap| will never wrap it (you will get errors if you use |external|) and |\zxExternalNoWrapNoExt| will not wrap the figure, but will disable temporary the externalization for diagrams to avoid errors. \item Sometimes, it seems that |external| cuts some parts of the picture, I guess when parts are drawn outside of the bounding box. I've not experienced that with the |zx-library| directly, but the example below with the |double copy shadow| has such issues, where a part of the shadow is cut (I guess the shadow is drawn outside of the bounding box). \item I experienced issues when using \texttt{external} with subfiles and biblatex. I will need to investigate it further at some points. Also, it seems that sometimes the inner sep of some labels in |external| mode defaults to zero (see the CNOT example below), I'm not sure why. Adding explicitly the value of the label fixes this. \end{itemize} \section{Advanced styling} \subsection{Overlaying or creating styles} It is possible to arbitrarily customize the styling, create or update ZX or tikz styles\dots{} First, any option that can be given to a |tikz-cd| matrix can also be given to a |ZX| environment (we refer to the manual of |tikz-cd| for more details). We also provide overlays to quickly modify the ZX style. \begin{stylekey}{/zx/default style nodes} This is where the default style must be loaded. By default, it simply loads the (nested) style packed with this library, |/zx/styles/rounded style|. You can change the style here if you would like to globally change a style. Note that a style must typically define at least |zxZ4|, |zxX4|, |zxFracZ6|, |zxFracX6|, |\zxH|, |zxHSmall|, |zxNoPhaseSmallZ|, |zxNoPhaseSmallX|, |zxNone{,+,-,I}|, |zxNoneDouble{,+,-,I}| and all the |phase in label*|, |pil*| styles (see code on how to define them). Because the above styles (notably |zxZ*| and |zxFrac*|) are slightly complex to define (this is needed in order to implement |phase in label|, |-| versions\dots{}), it may be quite long to implement them all properly by yourself. For that reason, it may be easier to load our default style and overlay only some of the styles we use (see example in |/zx/user overlay nodes| right after). You can check our code in |/zx/styles/rounded style| to see what you can redefine (intuitively, the styles like |my style name| should be callable by the end user, |myStyleName| may be redefined by users or used in tikzit, and |my@style@name| are styles that should not be touched by the user). The styles that have most interests are |zxNoPhase| (for Z and X nodes without any phase inside), |zxShort| (for Z and X nodes for fractions typically), |zxLong| (for other Z and X nodes) and |stylePhaseInLabel| (for labels when using |phase in label|). These basic styles are extended to add colors (just add |Z|/|X| after the name) like |zxNoPhaseZ|\dots{} You can change them, but if you just want to change the color, prefer to redefine |colorZxZ|/|colorZxZ| instead (note that this color does not change |stylePhaseInLabelZ/X|, so you are free to redefine these styles as well). All the above styles can however be called from inside a tikzit style, if you want to use tikzit internally (make sure to load this library then in |*.tikzdefs|). Note however that you should avoid to call these styles from inside |\zx{...}| since |\zx*| and |\zxFrac*| are supposed to choose automatically the good style for you depending on the mode (fractions, labels in phase\dots{}). For more details, we encourage the advanced users too look at the code of the library, and examples for simple changes will be presented now. \end{stylekey} \begin{stylekey}{/zx/user overlay nodes} If a user just wants to overlay some parts of the node styles, add your changes here. \begin{codeexample}[] {\tikzset{ /zx/user overlay nodes/.style={ zxH/.append style={dashed,inner sep=2mm} }} \zx{\zxNone{} \rar & \zxH{} \rar & \zxNone{}} } \end{codeexample} You can also change it on a per-diagram basis: \begin{codeexample}[] \zx[text=yellow,/zx/user overlay nodes/.style={ zxSpiders/.append style={thick,draw=purple}} ]{\zxX{} \rar & \zxX{\alpha} \rar & \zxFracZ-{\pi}{2}} \end{codeexample} The list of keys that can be changed will be given below in |/zx/styles/rounded style/*|. \end{stylekey} \begin{stylekey}{/zx/user overlay} This key will be loaded like it if were inside the options of the picture |\zx[options]{...}|. To avoid always typing |\zx[content vertically centered]{...}|, you can therefore use: \begin{codeexample}[width=0pt] \tikzset{ /zx/user overlay/.style={ content vertically centered, }, } \begin{ZX} \zxX{\alpha} & \zxX{\beta} & \zxX{a} & \zxX{b} & \zxX*{a\pi} & \zxX*{b\pi} & \zxX*{b'\pi} & \zxX*{'b\pi} & \zxZ{(a \oplus b )\pi} \end{ZX} \end{codeexample} \end{stylekey} \begin{stylekey}{/zx/default style wires} Default style for wires. Note that |/zx/wires definition/| is always loaded by default, and we don't add any other style for wires by default. But additional styles may use this functionality. \end{stylekey} \begin{stylekey}{/zx/user overlay wires} The user can add here additional styles for wires. \begin{codeexample}[] \begin{ZX}[/zx/user overlay wires/.style={thick,->,C/.append style={dashed}}] \zxNone{} \ar[d,C] \rar[] &[\zxWCol] \zxNone{}\\[\zxWRow] \zxNone{} \rar[] & \zxNone{} \end{ZX} \end{codeexample} \end{stylekey} \begin{stylekey}{/zx/styles/rounded style} This is the style loaded by default. It contains internally other (nested) styles that must be defined for any custom style. \end{stylekey} We present now all the properties that a new node style must have (and that can overlayed as explained above). \begin{stylekey}{/zx/styles/rounded style/zxAllNodes} Style applied to all nodes. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxEmptyDiagram} Style to draw an empty diagram. \end{stylekey} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/styles/rounded style/zxNone\@nil% \extractkey/zx/styles/rounded style/zxNone+\@nil% \extractkey/zx/styles/rounded style/zxNone-\@nil% \extractkey/zx/styles/rounded style/zxNoneI\@nil% \makeatother \pgfmanualbody Styles for None wires (no inner sep, useful to connect to wires). The |-|,|I|,|+| have additional horizontal, vertical, both spaces. \end{pgfmanualentry} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/styles/rounded style/zxNoneDouble\@nil% \extractkey/zx/styles/rounded style/zxNoneDouble+\@nil% \extractkey/zx/styles/rounded style/zxNoneDouble-\@nil% \extractkey/zx/styles/rounded style/zxNoneDoubleI\@nil% \makeatother \pgfmanualbody Like |zxNone|, but with more space to fake two nodes on a single line (not very used). \end{pgfmanualentry} \begin{stylekey}{/zx/styles/rounded style/zxSpiders} Style that apply to all circle spiders. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxNoPhase} Style that apply to spiders without any angle inside. Used by |\zxX{}| when the argument is empty. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxNoPhaseSmall} Like |zxNoPhase| but for spiders drawn in between wires. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxShort} Spider with text but no inner space. Used notably to obtain nice fractions. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxLong} Spider with potentially large text. Used by |\zxX{\alpha}| when the argument is not empty. \end{stylekey} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/styles/rounded style/zxNoPhaseZ\@nil% \extractkey/zx/styles/rounded style/zxNoPhaseX\@nil% \extractkey/zx/styles/rounded style/zxNoPhaseSmallZ\@nil% \extractkey/zx/styles/rounded style/zxNoPhaseSmallX\@nil% \extractkey/zx/styles/rounded style/zxShortZ\@nil% \extractkey/zx/styles/rounded style/zxShortX\@nil% \extractkey/zx/styles/rounded style/zxLongZ\@nil% \extractkey/zx/styles/rounded style/zxLongX\@nil% \makeatother \pgfmanualbody Like above styles, but with colors of |X| and |Z| spider added. The color can be changed globally by updating the |colorZxX| color. By default we use: \begin{verse} |\definecolor{colorZxZ}{RGB}{204,255,204}|\\ |\definecolor{colorZxX}{RGB}{255,136,136}|\\ |\definecolor{colorZxH}{RGB}{255,255,0}| \end{verse} as the second recommendation in \href{https://zxcalculus.com/accessibility.html}{\texttt{zxcalculus.com/accessibility.html}}. \end{pgfmanualentry} \begin{stylekey}{/zx/styles/rounded style/zxH} Style for Hadamard spiders, used by |\zxH{}| and uses the color |colorZxH|. \end{stylekey} \begin{stylekey}{/zx/styles/rounded style/zxHSmall} Like |zxH| but for Hadamard on wires, (see |H| style). \end{stylekey} \begin{pgfmanualentry} \extractcommand\zxConvertToFracInContent\marg{sign}\marg{num no parens}\marg{denom no parens}\marg{nom parens}\marg{denom parens}\@@ \extractcommand\zxConvertToFracInLabel\@@ \pgfmanualbody These functions are not meant to be used, but redefined using something like (we use |\zxMinus| as a shorter minus compared to $-$): \begin{verse} |\RenewExpandableDocumentCommand{\zxConvertToFracInLabel}{mmmmm}{%|\\ | \ifthenelse{\equal{#1}{-}}{\zxMinus}{#1}\frac{#2}{#3}%|\\ |}| \end{verse} This is used to change how the library does the conversion between |\zxFrac| and the actual written text (either in the node content or in the label depending on the function). The first argument is the sign (string |-| for minus, anything else must be written in place of the minus), the second and third argument are the numerator and denominator of the fraction when used in |\frac{}{}| while the last two arguments are the same except that they include the parens which should be added when using an inline version. For instance, one could get a call |\zxConvertToFracInLabel{-}{a+b}{c+d}{(a+b)}{(c+d)}|. See part on labels to see an example of use. \end{pgfmanualentry} \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxMinusUnchanged\@@ \extractcommand\zxMinus\@@ \extractcommand\zxMinusInShort\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/defaultEnv/zx column sep=length\@nil% \extractkey/zx/styles/rounded style preload/small minus\@nil% \extractkey/zx/styles/rounded style preload/big minus\@nil% \extractkey/zx/styles/rounded style/small minus\@nil% \extractkey/zx/styles/rounded style/big minus\@nil% \makeatother \pgfmanualbody |\zxMinus| is the minus sign used in fractions, |\zxMinusInShort| is used in |\zxZ-{\alpha}| and |\zxMinusUnchanged| is a minus sign shorter than $-$. You can redefine them, for instance: \begin{codeexample}[] Compare {\def\zxMinusInShort{-} \zx{\zxZ-{\alpha}} } and {\def\zxMinusInShort{\zxMinus} \zx{\zxZ-{\alpha}} } \end{codeexample} You can also choose to always use a big or a small minus, either on a per-node, per-figure, or document-wise. \begin{codeexample}[] \begin{ZX} \zxFracZ-{\pi}{4} & \zxZ-{\alpha} & \zxZ-{\delta_i} & \zxZ[small minus]-{\delta_i} \end{ZX} Picture-wise % \begin{ZX}[small minus] \zxZ-{\alpha} & \zxZ-{\delta_i} & \zxZ-{\delta_i} & \zxFracZ-{\pi}{4} \end{ZX} Document-wise % \tikzset{/zx/user overlay/.style={small minus}}% \begin{ZX} \zxZ-{\alpha} & \zxZ-{\delta_i} & \zxZ[big minus]-{\delta_i} & \zxFracZ[big minus]-{\pi}{4} \end{ZX} \end{codeexample} \end{pgfmanualentry} \noindent We also define several spacing commands that can be redefined to your needs: \begin{pgfmanualentry} \extractcommand\zxHCol{}\@@ \extractcommand\zxHRow{}\@@ \extractcommand\zxHColFlat{}\@@ \extractcommand\zxHRowFlat{}\@@ \extractcommand\zxSCol{}\@@ \extractcommand\zxSRow{}\@@ \extractcommand\zxSColFlat{}\@@ \extractcommand\zxSRowFlat{}\@@ \extractcommand\zxHSCol{}\@@ \extractcommand\zxHSRow{}\@@ \extractcommand\zxHSColFlat{}\@@ \extractcommand\zxHSRowFlat{}\@@ \extractcommand\zxWCol{}\@@ \extractcommand\zxWRow{}\@@ \extractcommand\zxwCol{}\@@ \extractcommand\zxwRow{}\@@ \extractcommand\zxDotsCol{}\@@ \extractcommand\zxDotsRow{}\@@ \extractcommand\zxZeroCol{}\@@ \extractcommand\zxZeroRow{}\@@ \extractcommand\zxNCol{}\@@ \extractcommand\zxNRow{}\@@ \pgfmanualbody These are spaces, to use like |&[\zxHCol]| or |\\[\zxHRow]| in order to increase the default spacing of rows and columns depending on the style of the wire. |H| stands for Hadamard, |S| for Spiders, |W| for Wires only, |w| is you link a |zxNone| to a spider (goal is to increase the space), |N| is when you have a |\zxN| and want to reduce the space between columns, |HS| for both Spiders and Hadamard, |Dots| for the 3 dots styles, |Zero| completely resets the default column sep. And of course |Col| for columns, |Row| for rows. \begin{codeexample}[width=3cm] \begin{ZX} \zxN{} \ar[rd,-N.] &[\zxwCol] &[\zxwCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-] \ar[rd,N.-] & \\[\zxNRow] \zxN{} \ar[ru,-N'] & & \zxN{} \end{ZX} \end{codeexample} Note that you can add multiple of them by separating with commas (see |\pgfmatrixnextcell|'s documentation for more details). For instance to have a column separation of exactly |2mm|, do |&[\zxZeroCol,2mm]| (if you just do |&[2mm]| the column will be |2mm| larger). \end{pgfmanualentry} \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxDefaultColumnSep{}\@@ \extractcommand\zxDefaultRowSep{}\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/defaultEnv/zx column sep=length\@nil% \extractkey/zx/defaultEnv/zx row sep=length\@nil% \makeatother \pgfmanualbody |\zxDefaultColumn/RowSep| are the column and row space, and the corresponding styles are to change a single matrix. Prefer to change these parameters compared to changing the |row sep| and |column sep| (without |zx|) of the matrix directly since other spacing styles like |\zxZeroCol| or |\zxNCol| depend on |\zxDefaultColumn|. \end{pgfmanualentry} \begin{pgfmanualentry} \extractcommand\zxDefaultSoftAngleS{}\@@ \extractcommand\zxDefaultSoftAngleO{}\@@ \extractcommand\zxDefaultSoftAngleChevron{}\@@ \pgfmanualbody Default opening angles of |S|, |o| and |v|/|<| wires. Defaults to respectively $30$, $40$ and $45$. \end{pgfmanualentry} \subsection{Wire customization}\label{subsec:wirecustom} \begin{pgfmanualentry} \makeatletter \def\extrakeytext{style, } \extractkey/zx/args/-andL/\@nil% \extractkey/zx/args/-andL/defaultO (default {-=.2,L=.4})\@nil% \extractkey/zx/args/-andL/defaultN (default {-=.2,L=.8})\@nil% \extractkey/zx/args/-andL/defaultN- (default {1-=.4,1L=0})\@nil% \extractkey/zx/args/-andL/defaultNN (default {})\@nil% \extractkey/zx/args/-andL/defaultNIN (default {1-=0,1L=.6})\@nil% \extractkey/zx/args/-andL/defaultS (default {-=.8,L=0})\@nil% \extractkey/zx/args/-andL/defaultS' (default {-=.8,L=.2})\@nil% \extractkey/zx/args/-andL/default-S (default {1-=.8,1L=0})\@nil% \extractkey/zx/args/-andL/defaultSIS (default {1-=0,1L=.8})\@nil% \makeatother \pgfmanualbody Default values used by wires (). You can customize them globally using something like: \begin{verse} |\tikzset{|\\ | /zx/args/-andL/.cd,|\\ | defaultO/.style={-=.2,L=.4}|\\ |}| \end{verse} Basically |defaultO| will configure all the |o| familly, |defaultS'| will configure all the ``soft'' versions of |s|, |default-S| will configure the anchor on the side of the vertical arrival\dots{} For more details or which wire uses which configuration, check the default value given in each style definition. \end{pgfmanualentry} \subsection{Wires starting inside or on the boundary of the node}\label{subsec:wiresInsideOutside} This library provides multiple methods to draw the wires between the nodes (for all curves depending on |bezier|, which is basically everything but |C| and straight lines). \paragraph{Default drawing method.} By default the lines will be drawn behind the node and the starting and ending points will be defined to be a |fake center *| anchor (if it exists, the exact chosen anchors (north, south\dots{}) depending on the direction). Because this anchor lies behind the node, we put them on the |edgelayer| layer. For debugging purpose, it can be practical to display them: \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxEdgesAbove\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/edge above\@nil% \extractkey/zx/wires definition/edge not above\@nil% \makeatother \pgfmanualbody If the macro |\zxEdgesAbove| is undefined (using |\let\zxEdgesAbove\undefined|) edges will be drawn above the nodes. To change it on a per-edge basis, use |edge above| (or its contrary |edge not above|) \emph{before the name of the wire}. This is mostly useful to understand how lines are drawn and for debugging purpose. \begin{codeexample}[] What you see: \zx{\zxZ{\alpha+\beta} \ar[dr,s] \\ & \zxZ{\alpha+\beta}} What is drawn: \zx{\zxZ{\alpha+\beta} \ar[dr,edge above,s] \\ & \zxZ{\alpha+\beta}} \end{codeexample} (you can node the fact that the wire does not start at the center but at a |fake center *| anchor to provide a nicer look) \end{pgfmanualentry} \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxControlPointsVisible\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/control points visible\@nil% \extractkey/zx/wires definition/control points not visible\@nil% \makeatother \pgfmanualbody Similarly, it can be useful for debugging to see the control points of the curves (note that |C|, straight lines and |(| wires are not based on our curve system, so it won't do anything for them). If the macro |\zxControlPointsVisible| is defined (using |\def\zxEdgesAbove{}|) control points will be drawn. To change it on a per-edge basis, use |control points visible| (or its contrary |control points not visible|). This is mostly useful to understand how lines are drawn and for debugging purpose. \begin{codeexample}[] Controls not visible: \zx{\zxZ{\alpha+\beta} \ar[dr,s] \\ & \zxZ{\alpha+\beta}} Control visible: \zx{\zxZ{\alpha+\beta} \ar[dr,control points visible,s] \\ & \zxZ{\alpha+\beta}} Control visible + edge above: \zx{\zxZ{\alpha+\beta} \ar[dr,edge above,control points visible,s] \\ & \zxZ{\alpha+\beta}} \end{codeexample} \textbf{WARNING}: this command adds some points in the wire path, and in particular if you have a |H| wire (Hadamard in the middle of the wire), this option will not place it correctly. But it's not really a problem since it's just to do a quick debugging. \end{pgfmanualentry} Unfortunately, the default drawing method also has drawbacks. For instance, when using the |H| edge between a spider and an empty node, the ``middle'' of the edge will appear too close to the center by default (we draw the first edge above to illustrate the reason of this visual artifact): \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} \ar[rd,edge above,-N.,H] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-,H] \ar[rd,N.-,H] & \\[\zxNRow] \zxN{} \ar[ru,-N',H] & & \zxN{} \end{ZX} \end{codeexample} To solve that issue, you need to manually position the |H| node as shown before: \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} \ar[rd,edge above,-N.,H={pos=.35}] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-,H={pos=1-.35}] \ar[rd,N.-,H={pos=1-.35}] & \\[\zxNRow] \zxN{} \ar[ru,-N',H={pos=.35}] & & \zxN{} \end{ZX} \end{codeexample} Or manually position the anchor outside the node (you can use angles, centered on the real center on the shape), but be aware that it can change the shape of the node (see below): \begin{codeexample}[width=0cm] \begin{ZX} \zxN{} \ar[rd,edge above,-N.,H,end anchor=180-45] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-,H,start anchor=45] \ar[rd,N.-,H,start anchor=-45] & \\[\zxNRow] \zxN{} \ar[ru,-N',H,end anchor=180+45] & & \zxN{} \end{ZX} \end{codeexample} A second drawback is that it is not possible to add arrows on the curved wires (except |C| which uses a different approach), since they will be hidden behind the node: \begin{codeexample}[] What you see: \zx{\zxZ{\alpha+\beta} \ar[dr,s,<->] \\ & \zxZ{\alpha+\beta}} What is drawn: \zx{\zxZ{\alpha+\beta} \ar[dr,edge above,s,<->] \\ & \zxZ{\alpha+\beta}} \end{codeexample} Here, the only solution (without changing the drawing mode) is to manually position the anchor as before\dots{} but note that on nodes with a large content |45| degrees is actually nearly on the top since the angle is not taken from a fake center but from the real center of the node. \begin{codeexample}[] \zx{\zxZ{\alpha+\beta} \ar[dr,s,<->,start anchor=-45,end anchor=180-45] \\ & \zxZ{\alpha+\beta}} \zx{\zxZ{\alpha+\beta} \ar[dr,s,<->,start anchor=-15,end anchor=180-15] \\ & \zxZ{\alpha+\beta}} \end{codeexample} Note that the shape of the wire may be a bit different since the ending and leaving parts was hidden before, and the current styles are not designed to look nicely when starting on the border of a node. For that reason, you may need to tweak the style of the wire yourself using |-|, |L| options. \paragraph{The ``intersection'' drawing methods} We also define other modes to draw wires (they are very new and not yet tested a lot). In the first mode, appropriate |fake center *| is taken, then depending on the bezier control points, a point is taken on the border of the shape (starting from the fake center and using the direction of the bezier control point). Then the node is drawn. Here is how to enable this mode: \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxEnableIntersections\marg{}\@@ \extractcommand\zxDisableIntersections\marg{}\@@ \extractcommand\zxEnableIntersectionsNodes\@@ \extractcommand\zxEnableIntersectionsWires\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/use intersections\@nil% \extractkey/zx/wires definition/dont use intersections\@nil% \makeatother \pgfmanualbody The simpler method to enables or disable intersections is to call |\zxEnableIntersections{}| or |\zxDisableIntersections{}| (potentially in a group to have a local action only). Note that \emph{this does not automatically adapt the styles}, see |ui| to adapt the styles automatically. \begin{codeexample}[width=0cm] {% Enable intersections (but does not load our custom "intersections" style, see ui). \zxEnableIntersections{}% Small space left = artifact of the documentation \begin{ZX} \zxN{} \ar[rd,edge above,-N.,H] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,N'-,H] \ar[rd,N.-,H] & \\[\zxNRow] \zxN{} \ar[ru,-N',H] & & \zxN{} \end{ZX} } \end{codeexample} (The |edge above| is just to show that the wire does not go inside.) However, this method enable intersections for the whole drawing. You can disable it for a single arrow using the |dont use intersections| style. But it is possible instead to enable it for a single wire. To do that, first define |\def\zxEnableIntersectionsNodes{}| (it will automatically add a |name path| on each node. If you don't care about optimizations, you can just define it once at the beginning of your project), and then use |use intersections| on the wires which should use intersections: \begin{codeexample}[width=0cm] {% Create the machinary needed to compute intersections, but does not enable it. \def\zxEnableIntersectionsNodes{}% Small space left = artifact of the documentation \begin{ZX} \zxN{} \ar[rd,edge above,-N.,H, %% "use intersections" does not load any style, cf ui. use intersections] &[\zxwCol,\zxHCol] &[\zxwCol,\zxHCol] \zxN{} \\[\zxNRow]%% & \zxX{\alpha} \ar[ru,edge above,N'-,H,use intersections] \ar[rd,edge above,N.-,H] & \\[\zxNRow] \zxN{} \ar[ru,edge above,-N',H] & & \zxN{} \end{ZX} } \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \def\extrakeytext{style, } \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/ui\@nil% \makeatother \pgfmanualbody This method has however a few drawbacks. One of the first reason that explains why we don't use it by default is that it is quite long to compute (it involves the |intersections| library to obtain the bezier point to start at and my code may also be not very well optimized as I'm a beginner with \LaTeX{} and \tikzname{} programming\dots{} and what a strange language!). Secondly, it has not yet been tested a lot. Note also that the default wire styles have not been optimized for this setup and the results may vary compared to the default drawing mode (sometimes they are ``better'', sometimes they are not). We have however tried to define a second style |/zx/args/-andL/ui/| that have nicer results. To load it, just type |ui| \emph{before the wire style name}, it will automatically load |use intersections| together with our custom styles (see below how to use |user overlay wires| to load it by default): %%% I'm not sure why, but inside codeexample if I write zxEnableIntersections{}%<go to line>\begin{ZX}... then an additional space is added... Not sure why. \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} Before \begin{ZX} \zxX{\beta} \ar[r,o'] & \zxX{} \end{ZX} after \begin{ZX} \zxX{\beta} \ar[r,o',use intersections] & \zxX{} \end{ZX} corrected manually \begin{ZX} \zxX{\beta} \ar[r,edge above, use intersections, o'={-=.2,L=.15}] & \zxX{} \end{ZX} or with our custom style \begin{ZX} \zxX{\beta} \ar[r,edge above, ui, o'] & \zxX{} \end{ZX}. } \end{codeexample} Here are further comparisons: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} Before \begin{ZX} \zxX{} \ar[r,o'] & \zxX{} \end{ZX} ui \begin{ZX} \zxX{} \ar[r, ui, o'] & \zxX{} \end{ZX}. Before \begin{ZX} \zxX{\alpha} \ar[r,o'] & \zxZ{\beta} \end{ZX} ui \begin{ZX} \zxX{\alpha} \ar[r, ui, o'] & \zxZ{\beta} \end{ZX}. Before \begin{ZX} \zxX{\alpha+\beta} \ar[r,o'] & \zxZ{\alpha+\beta} \end{ZX} ui \begin{ZX} \zxX{\alpha+\beta} \ar[r, ui, o'] & \zxZ{\alpha+\beta} \end{ZX}. } \end{codeexample} With |N|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,N]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,N]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,N]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,N]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,N]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,N]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,N]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,N]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |N-|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,N-]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,N-]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,N-]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,N-]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,N-]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,N-]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,N-]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,N-]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |<.|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,<.]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,<.]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,<.]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,<.]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,<.]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,<.]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,<.]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,<.]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |NIN|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,NIN]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,NIN]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,NIN]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,NIN]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,NIN]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,NIN]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,NIN]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,NIN]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With this mode |s| behaves basically like |S| since the only difference is the anchor: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,s]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,s]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,s]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,s]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,s]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,s]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,s]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,s]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |s'|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,s']\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,s']\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,s']\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,s']\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,s']\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,s']\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,s']\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,s']\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |-s|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,-s]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,-s]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,-s]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,-s]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,-s]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,-s]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,-s]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,-s]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |SIS|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,SIS]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,SIS]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,SIS]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,SIS]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,SIS]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,SIS]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,SIS]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,SIS]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} With |^.|: \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{} \ar[rd,^.]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{} \ar[rd,ui,^.]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,^.]\\ & \zxX{} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,^.]\\ & \zxX{} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[rd,^.]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha} \ar[rd,ui,^.]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha+\beta} \ar[rd,^.]\\ & \zxX{\alpha+\beta} \end{ZX} $\Rightarrow $ ui \begin{ZX} \zxX{\alpha+\beta} \ar[rd,ui,^.]\\ & \zxX{\alpha+\beta} \end{ZX}. } \end{codeexample} Now using our favorite drawing. Here we illustrate how we apply our custom style to all arrows. \begin{codeexample}[width=0pt] \def\zxEnableIntersectionsNodes{}% \tikzset{ /zx/user overlay wires/.style={ ui, % Other method } } \begin{ZX}[ execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,'>] % Column 4 \ar[from=toprightpi,to=mypi2,-N] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[edge above,use intersections,from=Xdown,to=out3,<.] \end{ZX} \end{codeexample} The same using |control points visible| to check if the good styles are applied: \begin{codeexample}[width=0pt] \def\zxEnableIntersectionsNodes{}% \tikzset{ /zx/user overlay wires/.style={ ui, % Enable our style on all edge above, % For debugging control points visible % For debugging } } \def\zxDebugMode{} \def\zxControlPointsVisible{} \begin{ZX}[ execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,'>] % Column 4 \ar[from=toprightpi,to=mypi2,-N] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[edge above,use intersections,from=Xdown,to=out3,<.] \end{ZX} \end{codeexample} Now, we can also globally enable the |ui| style and the intersection only for some kinds of arrows. For instance (here we enable it for all styles based on |N|, i.e.\ |*N*| and |>|-like wires). See that the |s| node is note using the intersections mode: \begin{codeexample}[width=0pt] \def\zxEnableIntersectionsNodes{}% \tikzset{ /zx/user overlay wires/.style={ %% Nbase changes both N-like and >-like styles. %% Use N/.append to change only N-like. Nbase/.append style={% ui, % intersection only for arrows based on N (N and <) }, edge above, % For debugging control points visible % For debugging } } \def\zxDebugMode{} \def\zxControlPointsVisible{} \begin{ZX}[ execute at begin picture={% %%% Definition of long items (the goal is to have a small and readable matrix % (warning: macro can't have numbers in TeX. Also, make sure not to use existing names) \def\Zpifour{\zxFracZ[a=Zpi4]-{\pi}{4}}% \def\mypitwo{\zxFracX[a=mypi2]{\pi}{2}}% } ] %%% Matrix: in emacs "M-x align" is practical to automatically format it. a is for 'alias' & \zxN[a=n]{} & \zxZ[a=xmiddle]{} & & \zxN[a=out1]{} \\ \zxN[a=in1]{} & \Zpifour{} & \zxX[a=Xdown]{} & \mypitwo{} & \\ & & & & \zxN[a=out2]{} \\ \zxN[a=in2]{} & \zxX[a=X1]{} & \zxZ[a=toprightpi]{\pi} & & \zxN[a=out3]{} %%% Arrows % Column 1 \ar[from=in1,to=X1,s] \ar[from=in2,to=Zpi4,.>] % Column 2 \ar[from=X1,to=xmiddle,N'] \ar[from=X1,to=toprightpi,H] \ar[from=Zpi4,to=n,C] \ar[from=n,to=xmiddle,wc] \ar[from=Zpi4,to=Xdown] % Column 3 \ar[from=xmiddle,to=Xdown,C-] \ar[from=xmiddle,to=mypi2,'>] % Column 4 \ar[from=toprightpi,to=mypi2,-N] \ar[from=mypi2,to=out1,<'] \ar[from=mypi2,to=out2,<.] \ar[edge above,use intersections,from=Xdown,to=out3,<.] \end{ZX} \end{codeexample} \end{pgfmanualentry} \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxIntersectionLineBetweenStartEnd\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/intersections mode between start end\@nil% \extractkey/zx/wires definition/intersections mode bezier controls\@nil% \makeatother \pgfmanualbody Node that we also defined another intersection mechanism, in which the intersection with the node boundary is computed using the line that links the two fake centers of the starting and ending point. To use it, either define |\def\zxIntersectionLineBetweenStartEnd{}| or use the style |intersections between start end| (or to come back to the normal intersection mode |intersections bezier controls|). Note that this just changes the mode of computing intersections, but does not enable intersections, you still need to enable intersections as explained above (for instance using |use intersection|, or |ui| if you also want to load our style). Note however that we don't spent too much effort in this mode as the result is often not really appealing, in particular the |o| shapes, and therefore we designed no special style for it and made only a few tests. \begin{codeexample}[] {% \def\zxEnableIntersectionsNodes{} \begin{ZX} \zxX{\alpha} \ar[rd,N]\\ & \zxX{\beta} \end{ZX} $\Rightarrow $ between start end \begin{ZX} \zxX{\alpha} \ar[edge above,rd,ui,intersections mode between start end,N]\\ & \zxX{\beta} \end{ZX}. \begin{ZX} \zxX{\alpha} \ar[r,o'] \ar[r,o.] & \zxX{\beta} \end{ZX} $\Rightarrow $ between start end \begin{ZX} \zxX{\alpha} \ar[r,ui,intersections mode between start end,o'] \ar[r,ui,intersections mode between start end,o.] & \zxX{\beta} \end{ZX}. } \end{codeexample} \end{pgfmanualentry} \subsection{Nested diagrams} If you consider this example: \begin{codeexample}[] Alone \begin{ZX} \zxX{} \rar \ar[r,o'] \ar[r,o.] & \zxZ{} \end{ZX} % in a diagram % \begin{ZX}[math baseline=myb] &[\zxwCol] \zxFracX-{\pi}{2} & \zxX{} \rar \ar[r,o'] \ar[r,o.] & \zxZ{} &[\zxwCol]\\ \zxN[a=myb]{} \rar & \zxFracZ{\pi}{2} \rar & \zxFracX{\pi}{2} \rar & \zxFracZ{\pi}{2} \rar & \zxN{} \end{ZX} \end{codeexample} You can see that the constant looks much wider in the second picture, due to the fact that the nodes below increase the column size. One solution I found for this problem is to use |savebox| to create your drawing \emph{before} the diagram, and then use the |fit| library to include it where you want in the matrix (see below for a command that does that automatically): \begin{codeexample}[width=0pt] %% Create a new box \newsavebox{\myZXbox} %% Save our small diagram. %% Warning: on older versions, you needed to use \& instead of & %% (the char '&' cause troubles in functions), but I fixed it 2022/02/09 \sbox{\myZXbox}{% % add \tikzset{external/optimize=false} if you use tikz "external" library % \zx{ % you may need \zxX{} \rar \ar[r,o'] \ar[r,o.] & \zxZ{} }% } $x = \begin{ZX}[ execute at end picture={ %% Add our initial drawing at the end: \node[fit=(start)(end),yshift=-axis_height] {\usebox{\myZXbox}}; }, math baseline=myb] &[\zxwCol] \zxFracX-{\pi}{2} & \zxN[a=start]{} & \zxN[a=end]\\ \zxN[a=myb]{} \rar & \zxFracZ{\pi}{2} \rar & \zxFracX{\pi}{2} \rar & \zxFracZ{\pi}{2} \rar & \zxN{} \end{ZX}$ \end{codeexample} Note that this has the advantage of preserving the baseline of the big drawing. However, it is a bit cumbersome to use, so we provide here a wrapper that automatically does the following code (this has not been tested extensively, and may be subject to changes): \begin{pgfmanualentry} \def\extrakeytext{style, } \extractcommand\zxSaveDiagram\marg{name with backslash}\opt{\oarg{zx options}}\marg{diagram with ampersand \textbackslash\&}\@@ \makeatletter% should not be letter for \@@... strange \extractkey/zx/wires definition/use diagram=\marg{name with \textbackslash}\marg{(nodes)(to)(fit)}\@nil% \makeatother \pgfmanualbody Use |\zxSaveDiagram| to save and name a diagram (must be done before the diagram you want to insert this diagram into, also \textbf{do not forget the backslash before the name} (note that before 2022/09/02, you needed to use |\&| instead of |&|, but this has been fixed after 2022/02/09. In case you really care about backward compatibility, either download the sty file in your folder, or add |ampersand replacement=\&| in the options of the |\zxSaveDiagram| function and use |\&| as before). Then, |use diagram| to insert it inside a diagram (the second argument is a list of nodes given to the |fit| library): \begin{codeexample}[width=0pt] %% v---- note the backslash \zxSaveDiagram{\myZXconstant}{\zxX{} \rar \ar[r,o'] \ar[r,o.] & \zxZ{}} $x = \begin{ZX}[use diagram={\myZXconstant}{(start)(end)}, math baseline=myb] &[\zxwCol] \zxFracX-{\pi}{2} & \zxN[a=start]{} & \zxN[a=end]\\ \zxN[a=myb]{} \rar & \zxFracZ{\pi}{2} \rar & \zxFracX{\pi}{2} \rar & \zxFracZ{\pi}{2} \rar & \zxN{} \end{ZX}$ \end{codeexample} Note that if you need more space to insert the drawing, you can use |\zxN+[a=start,minimum width=2cm]{}| instead of |\zxN|. \end{pgfmanualentry} Sometimes, you may also find useful to stack diagram (for instance because the matrix of the first diagram is not related to the matrix of the second diagram), or position multiple diagrams relative to each others. You can use arrays to do that, but it will not preserve the baseline. Another solution is to put the |ZX| environment inside nodes contained in |tikzpicture| (I know that tikz does not like nesting\dots{} but it works nice for what I tried. I will also try to provide an helper function to do that later). For example here we use it to add the constants below the second diagram: \begin{codeexample}[vbox] \begin{equation*} \begin{ZX} &[\zxwCol] & &[\zxwCol] \zxN{} \ar[dd,3 vdots] \\[\zxNCol] \zxN{} \rar & \zxZ{\pi} \rar & \zxX{\alpha} \ar[ru,N'-] \ar[rd,N.-] & \\[\zxNCol] & & & \zxN{} \end{ZX} = \begin{tikzpicture}[ baseline=(A.base) ] \node[inner sep=0pt](A){% \begin{ZX} &[\zxwCol] & \zxZ{\pi} \ar[dd,3 vdots] \rar &[\zxwCol] \zxN{} \\[\zxNCol] \zxN{} \rar & \zxX-{\alpha} \ar[ru,N'-] \ar[rd,N.-] & \\[\zxNCol] & & \zxZ{\pi} \rar & \zxN{} \end{ZX} }; \node[inner sep=0pt,below=\zxDefaultColumnSep of A](B){ \begin{ZX} \zxOneOverSqrtTwo{} & \zxX{\alpha} \rar & \zxZ{\pi} \end{ZX} }; \end{tikzpicture} \end{equation*} \end{codeexample} \subsection{Further customization} You can further customize your drawings using any functionality from \tikzname{} and |tikz-cd| (but it is of course at your own risk). For instance, we can change the separation between rows and/or columns for a whole picture (but prefer to use |zx row sep| as it also updates pre-configured column spaces): \begin{codeexample}[width=0pt] \begin{ZX}[row sep=1mm] & & & & \zxZ{\pi} \\ \zxN{} \rar & \zxX{} \ar[rd,(.] \ar[urrr,(',H] & & & & \zxN{} \\ & & \zxZ{} \ar[rd,s.] \rar & \zxFracX{\pi}{2} \ar[uur,('] \ar[rru,<'] \ar[rr] & & \zxN{} \\ \zxN{} \rar & \zxFracZ-{\pi}{4} \ar[ru,('] \ar[rr,o.] & & \zxX{} \ar[rr] & & \zxN{} \end{ZX} \end{codeexample} Or we can define our own style to create blocks: {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[width=0pt] { % \usetikzlibrary{shadows} \tikzset{ my bloc/.style={ anchor=center, inner sep=2pt, inner xsep=.7em, minimum height=3em, draw, thick, fill=blue!10!white, double copy shadow={opacity=.5},tape, } } \zx{|[my bloc]| f \rar &[1mm] |[my bloc]| g \rar &[1mm] \zxZ{\alpha} \rar & \zxNone{}} } \end{codeexample} } We can also use for instance |fit|, |alias|, |execute at end picture| and layers (the user can use |background| for things behind the drawing, |box| for special nodes above the drawings (like multi-column nodes, see below), and |foreground| which is even higher) to do something like that: {\catcode`\|=12 % Ensures | is not anymore \verb|...| \begin{codeexample}[width=3cm] % \usetikzlibrary{fit} \begin{ZX}[ execute at end picture={ \node[ inner sep=2pt, node on layer=background, %% Ensure the node is behind the drawing rounded corners, draw=blue, dashed, fill=blue!50!white, opacity=.5, fit=(cnot1)(cnot2), %% position of the node, thanks fit library "\textsc{cnot}" {above, inner sep=1mm} %% Adds label, thanks quote library. Not sure why, inner sep is set to 0 when using tikz "external" library. ] {}; } ] \zxNone{} \rar & \zxZ[alias=cnot1]{} \dar \rar & \zxNone{}\\ \zxNone{} \rar & \zxX[alias=cnot2]{} \rar & \zxNone{}\\ \end{ZX} \end{codeexample} } \section{Future works} There is surely many things to improve in this library, and given how young it is there is surely many undiscovered bugs. So feel free to propose ideas or report bugs \mylink{https://github.com/leo-colisson/zx-calculus/issues}{one the github page}. The intersections code is also quite slow, so I would be curious to check if I can optimize it (the first goal was to make it work). I should also work on the compatibility with tikzit (basically just write tikz configuration files that you can just use and document how to use tikzit with it), or even write a dedicated graphical tool (why not based on tikzit itself, or \mylink{https://tikzcd.yichuanshen.de/}{this tool}). I also want to find a nicer way to merge cells (for now I propose to use the |fit| library but it's not very robust against overlays) and to nest ZX diagrams. And of course fix typos in the documentation and write other styles, including notations not specific to ZX-calculus. Feel free to submit Pull Requests to solve that, or to submit bug reports to explain uncovered use-cases! \section{Acknowledgement} I'm very grateful of course to everybody that worked on these amazing field which made diagramatic quantum computing possible, and to the many StackExchange users that helped me to understand a bit more \LaTeX{} and \tikzname{} (sorry, I don't want to risk an incomplete list, but many thanks to egreg, David Carlisle, cfr, percusse, Andrew Stacey, Henri Menke, SebGlav\dots{}). I also thank Robert Booth for making me realize how my old style was bad, and for giving advices on how to improve it. Thanks to John van de Wetering, whose style has also been a source of inspiration. \section{Changelog} \begin{itemize} \item 2022/02/09: \begin{itemize} \item Added compatibility with external tikz library \item More robust handling of |&|: align and macros does not need |\&| anymore. \item |\&| in |\zxSaveDiagram| is replaced with |&|. This introduces a small backward incompatibility, but hey, I said it was still subject to changes :-) \end{itemize} \end{itemize} \printindex \end{document}
{ "alphanum_fraction": 0.6595737414, "avg_line_length": 52.0960854093, "ext": "tex", "hexsha": "d3a4eb4f02e2789a0c3d476293b303513874316b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5a6eb8c723951d27313b642a19ffd9f703eb720b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "leo-colisson/zx-calculus", "max_forks_repo_path": "doc/zx-calculus.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5a6eb8c723951d27313b642a19ffd9f703eb720b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "leo-colisson/zx-calculus", "max_issues_repo_path": "doc/zx-calculus.tex", "max_line_length": 1515, "max_stars_count": null, "max_stars_repo_head_hexsha": "5a6eb8c723951d27313b642a19ffd9f703eb720b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "leo-colisson/zx-calculus", "max_stars_repo_path": "doc/zx-calculus.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 46119, "size": 146390 }
\addchap{Preface} \begin{refsection} Head-driven Phrase Structure Grammar (HPSG) is a declarative (or, as is often said, constraint-based) monostratal approach to grammar which dates back to early 1985, when Carl Pollard presented his Lectures on HPSG. It was developed initially in joint work by Pollard and Ivan Sag, but many other people have made important contributions to its development over the decades. It provides a framework for the formulation and implementation of natural language grammars which are (i) linguistically motivated, (ii) formally explicit, and (iii) computationally tractable. From the very beginning it has involved both theoretical and computational work seeking both to address the theoretical concerns of linguists and the practical issues involved in building a useful natural language processing system. HPSG is an eclectic framework which has drawn ideas from the earlier Generalized Phrase Structure Grammar (GPSG, \citealp{GKPS85a}), Categorial Grammar \citep{Ajdukiewicz35a-u}, and Lexical"=Functional Grammar (LFG, \citealp{Bresnan82a-ed}), among others. It has naturally evolved over the decades. Thus, the construction"=based version of HPSG, which emerged in the mid-1990s \citep{Sag97a,GSag2000a-u}, differs from earlier work \citep{ps,ps2} in employing complex hierarchies of phrase types or constructions. Similarly, the more recent Sign-Based Construction Grammar approach differs from earlier versions of HPSG in making a distinction between signs and constructions and using it to make a number of simplifications \citep{Sag2012a}. Over the years, there have been groups of HPSG researchers in many locations engaged in both descriptive and theoretical work and often in building HPSG-based computational systems. There have also been various research and teaching networks, and an annual conference since 1993. The result of this work is a rich and varied body of research focusing on a variety of languages and offering a variety of insights. The present volume seeks to provide a picture of where HPSG is today. It begins with a number of introductory chapters dealing with various general issues. These are followed by chapters outlining HPSG ideas about some of the most important syntactic phenomena. Next are a series of chapters on other levels of description, and then chapters on other areas of linguistics. A final group of chapters considers the relation between HPSG and other theoretical frameworks. It should be noted that for various reasons not all areas of HPSG research are covered in the handbook (e.g., phonology). So, the fact that a particular topic is not addressed in the handbook should not be interpreted as an absence of research on the topic. Readers interested in such topics can refer to the HPSG online bibliography maintained at the Humboldt Universität zu Berlin.\footnote{% \url{https://hpsg.hu-berlin.de/HPSG-Bib/}, 2021-04-29. } All chapters were reviewed by one author and at least one of the editors. All chapters were reviewed by Stefan Müller. Jean-Pierre Koenig and Stefan Müller did a final round of reading all papers and checked for consistency and cross-linking between the chapters. \section*{Open access} Many authors of this handbook have previously been involved in several other handbook projects (some that cover various aspects of HPSG), and by now there are at least five handbook articles on HPSG available. But the editors felt that writing one authoritative resource describing the framework and being available free of charge to everybody was an important service to the linguistic community. We hence decided to publish the book open access with Language Science Press. % militant version starts here: =:-) %% The authors of this handbook were involved in many, many other handbook projects before. By now %% there are at least five handbook articles on HPSG available. %% % Detmar Bob Levine %% % Stefan (HSK) %% % Stefan (Artenvielfalt) %% % Stefan & Felix %% % Stefan & Antonio %% % Adam Przepiórkowski and Anna Kupść in journal %% The editors felt that writing these handbook articles for commercial publishers who will hide them %% behind paywalls is a waste of time. Established researchers do not need further handbook articles %% that people cannot read. What is needed instead is one authoritative resource describing the framework %% and being available free of charge to everybody. We hence decided to publish the book open access %% with Language Science Press. \section*{Open source} Since the book is freely available and no commercial interests stand in the way of openness, the \LaTeX\ source code of the book can be made available as well. We put all relevant files on GitHub,\footnote{ \url{https://www.github.com/langsci/\lsID}, 2021-04-29. } and we hope that they may serve as a role model for future publications of HPSG papers. Additionally, every single item in the bibliographies was checked by hand either by Stefan Müller or by one of his student assistants. We checked authors and editors; made sure first name information was complete; corrected page numbers; removed duplicate entries; added DOIs and URLs where appropriate; and added series and number information as applicable for books, book chapters, and journal issues. The result is a resource containing 2623 bibliography entries. These can be downloaded as a single readable PDF file or as \textsc{Bib}\TeX{} file from \url{https://github.com/langsci/hpsg-handbook-bib}. \section*{\acknowledgmentsUS} We thank all the authors for their great contributions to the book, and for reviewing chapters and chapter outlines of the other authors. We thank Frank Richter, Bob Levine, and Roland Schäfer for discussion of points related to the handbook, and Elizabeth Pankratz for extremely careful proofreading and help with typesetting issues. We also thank Elisabeth Eberle and Luisa Kalvelage for doing bibliographies and typesetting trees of several chapters and for converting a complicated chapter from Word into \LaTeX. We thank Sebastian Nordhoff and Felix Kopecky for constant support regarding \LaTeX{} issues, both for the book project overall and for individual authors. Felix implemented a new \LaTeX{} class for typesetting AVMs, \texttt{langsci-avm}, which was used for typesetting this book. It is compatible with more modern font management systems and with the \texttt{forest} package, which is used for most of the trees in this book. We thank Sašo Živanović for writing and maintaining the \texttt{forest} package and for help specifying particular styles with very advanced features. His package turned typesetting trees from a nightmare into pure fun! To make the handling of this large book possible, Stefan Müller asked Sašo for help with externalization of \texttt{forest} trees, which led to the development of the \texttt{memoize} package. The HPSG handbook and other book projects by Stefan were an ideal testing ground for externalization of \texttt{tikz} pictures. Stefan wants to thank Sašo for the intense collaboration that led to a package of great value for everybody living in the woods. \section*{Abbreviations and feature names used in the book} \begin{longtable}{@{}p{3cm}p{9cm}@{}} \feat{1st-pc} & first position class \\ \feat{accent} & accent \\ \feat{act(or)} & actor argument \\ \feat{addressee} & index for addressee \\ \feat{aff} & affixes \\ \feat{agr} & agreement \\ \feat{anaph} & anaphora \\ \feat{ancs} & anchors \\ \feat{antec} & antecedent referent markers \\ \feat{arg} & semantic argument of a relation \\ \feat{arg-st} & argument Structure \\ \feat{aux} & auxiliary verb (or not) \\ \feat{background} (\feat{backgr}) & background assumptions \\ \feat{bd} & boundary tone \\ \feat{bg} & background (in information structure) \\ \feat{body} & body (nuclear scope) of quantifier \\ \feat{case} & case \\ \feat{category} & syntactic category information \\ \feat{c-indices} (\feat{c-inds}) & contextual indices \\ \feat{cl} & inflectional class \\ \feat{clitic} (\feat{clts}) & clitics \\ \feat{conds} & predicative conditions \\ \feat{cluster} & cluster of phrases \\ \feat{coll} & collocation type \\ \feat{comps} & complements \\ \feat{concord} & concord information \\ \feat{content} (\feat{cont}) & lexical semantic content \\ \feat{context} (\feat{ctxt}) & contextual information \\ \feat{coord} & coordinator \\ \feat{correl} & correlative marker \\ \feat{det} & semantic determiner (a.k.a. quantifier force) \\ \feat{dsl} & double slash \\ \feat{deps} & dependents \\ \feat{dom} & order Domain \\ \feat{dr} & discourse referent \\ \feat{dte} & designated terminal element \\ \feat{dtrs} & daughters \\ \feat{econt} & external content \\ \feat{embed} & embedded (or not) \\ \feat{ending} & inflectional ending \\ \feat{exp} & experiencer \\ \feat{excont} (\feat{exc}) & external content (in LRS) \\ \feat{extra} & extraposed syntactic argument \\ \feat{fc} & focus-marked lexical item \\ \feat{fcompl} & functional complement \\ \feat{fig} & figure in a locative relation \\ \feat{first} & first member of a list \\ \feat{focus} & focus \\ \feat{form} & form of a lexeme \\ \feat{fpp} & focus projection potential \\ \feat{gend} & gender \\ \feat{given} & given information \\ \feat{grnd} & ground in a locative relation \\ \feat{ground} & ground (in information structure) \\ \feat{gtop} & global top \\ \feat{harg} & hole argument of handle constraints \\ \feat{hcons} & handle constraints (to establish relative scope in MRS) \\ \feat{head} (\feat{hd}) & head features\\ \feat{hd-dtr} & head-daughter \\ \feat{hook} & hook (relevant for scope relations in MRS) \\ \feat{ic} & inverted clause (or not) \\ \feat{icons} & individual constraints \\ \feat{icont} & internal content \\ \feat{i-form} & inflected form \\ \feat{index} (\feat{ind}) & semantic index \\ \feat{incont} (\feat{inc}) & internal content (in LRS) \\ \feat{infl} & inflectional features \\ \feat{info-struc} & information structure \\ \feat{inher} & inherited non-local features \\ \feat{inst} & instance (argument of an object category) \\ \feat{inv} & inverted verb (or not) \\ \feat{ip} & intonational phrase \\ \feat{key} & key semantic relation \\ \feat{lagr} & left conjunct agreement \\ \feat{larg} & label argument of handle constraints \\ \feat{lbl} & label of elementary predications \\ \feat{lex-dtr} & lexical daughter \\ \feat{lexeme} & lexeme identifier \\ \feat{lf} & logical form \\ \feat{lid} & lexical identifier \\ \feat{light} & light expressions (or not) \\ \feat{link} & link (in information structure) \\ \feat{listeme} & lexical identifier \\ \feat{liszt} & list of semantic relations \\ \feat{local} & syntactic and semantic information relevant in local contexts \\ \feat{l-periph} & left periphery \\ \feat{ltop} & local top \\ \feat{major} & major part of speech features \\ \feat{major} & major or minor part of speech \\ \feat{main} & main semantic contribution of a lexeme \\ \feat{marking} (\feat{mrkg}) & marking \\ \feat{max-qud} & maximal question under discussion \\ \feat{mc} & main clause (or not) \\ \feat{$\mu$-feat} & morphological features \\ \feat{minor} & minor part of speech features \\ \feat{mkg} & information structure properties (marking) of lexical items \\ \feat{mod} & modified expression \\ \feat{modal-base} & modal modification of situation core \\ %\feat{mtr} & Mother \\ \feat{mood} & mood \\ \feat{morph} & morphology \\ \feat{morph-b} & morphological base \\ \feat{mp} & morphophonology \\ \feat{mph} & morphs \\ \feat{ms} & morphosyntactic (or morphosemantic) property set \\ \feat{mud} & morph under discussion \\ \feat{n} & nominal part of speech \\ \feat{neg} & negative expression \\ \feat{non-head-dtrs} (\feat{nh-dtrs}) & non-head daughters \\ \feat{nonlocal} & syntactic and semantic information relevant for non-local dependencies \\ \feat{nucl} & nucleus of a state of affairs \\ \feat{numb} & number \\ \feat{params} & parameters (restricted variables) \\ \feat{pa} & pitch accent \\ \feat{parts} & list of meaningful expressions \\ \feat{pers} & person \\ \feat{pc} & position class \\ \feat{pform} & preposition form \\ \feat{phon} (\feat{ph}) & phonology \\ \feat{phon-string} & phonological string \\ \feat{php} & phonological phrase \\ \feat{pol} & polarity \\ \feat{pool} & pool of quantifiers to be retrieved \\ \feat{prd} & predicative (or not) \\ \feat{pred} & predicate \\ \feat{pref} & prefixes \\ \feat{pre-modifier} & modifiers before the modified (or not) \\ \feat{prop} & proposition \\ \feat{quants} & list of quantifiers \\ \feat{qstore} & quantifier store \\ \feat{qud} & question under discussion \\ \feat{ques} & question \\ %Not sure what this does, p.396 \feat{ragr} & right conjunct agreement \\ \feat{realized} & realized syntactic argument \\ \feat{rel} & indices for relatives \\ \feat{rln} (\feat{rel}) & semantic relation \\ \feat{rels} & list or set of semantic relations \\ \feat{rest} & non-first members of a list \\ \feat{restr} & restriction of quantifier (in MRS) \\ \feat{restrictions} (\feat{restr}) & restrictions on index \\ \feat{retrieved} & retrieved quantifiers \\ \feat{r-mark} & reference marker \\ \feat{root} & root clause or not \\ \feat{rr} & realizational Rules \\ \feat{sal-utt} & salient Utterance \\ \feat{select} (\feat{sel}) & selected expression \\ \feat{sit} & situation \\ \feat{sit-core} & situation core \\ \feat{slash} & set of locally unrealized arguments \\ \feat{soa} (\feat{soa-arg}) & state Of Affairs \\ \feat{speaker} & index for the Speaker \\ \feat{spec} & specified \\ \feat{spr} & specifier \\ \feat{status} & information structure status \\ \feat{stem} & stem phonology \\ \feat{stm-pc} & stem position class \\ \feat{store} & same as \feat{q-store} \\ %Check GS \feat{struc-meaning} & structured meaning \\ \feat{subj-agr} & subject agreement \\ \feat{subcat} & subcategorization \\ \feat{synsem} & syntax/ Semantics features \\ \feat{subj} & subject \\ \feat{tail} & tail (in information structure) \\ \feat{tam} & tense, aspect, mood \\ \feat{tns} & tense \\ \feat{topic} & topic \\ \feat{tp} & topic-marked lexical item \\ \feat{und} & undergoer argument \\ \feat{ut} & phonological utterance \\ \feat{v} & verbal part of speech \\ \feat{val} & valence \\ \feat{var} & variable (bound by a quantifier) \\ \feat{vform} & verb form \\ \feat{weight} & expression weight \\ \feat{wh} & \emph{wh}-expression (for questions) \\ \feat{xarg} & extra-argument \\ \end{longtable} \printbibliography[heading=subbibliography] \end{refsection} % <!-- Local IspellDict: en_US-w_accents -->
{ "alphanum_fraction": 0.7435844655, "avg_line_length": 50.7804878049, "ext": "tex", "hexsha": "dd95f8091d3373d250072497b9df80af290d09e6", "lang": "TeX", "max_forks_count": 9, "max_forks_repo_forks_event_max_datetime": "2021-01-14T10:35:42.000Z", "max_forks_repo_forks_event_min_datetime": "2018-03-20T20:05:04.000Z", "max_forks_repo_head_hexsha": "2b7aa5d3301f5b18061c9f76af311e3bebccca9a", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "langsci/259", "max_forks_repo_path": "chapters/preface.tex", "max_issues_count": 177, "max_issues_repo_head_hexsha": "2b7aa5d3301f5b18061c9f76af311e3bebccca9a", "max_issues_repo_issues_event_max_datetime": "2022-03-29T15:28:50.000Z", "max_issues_repo_issues_event_min_datetime": "2021-01-29T10:48:51.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "langsci/259", "max_issues_repo_path": "chapters/preface.tex", "max_line_length": 475, "max_stars_count": 16, "max_stars_repo_head_hexsha": "50bbaaf0bd7b3f8779d4e7d3685bd4a0020dfa12", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "langsci/hpsg-handbook", "max_stars_repo_path": "chapters/preface.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-05T11:42:54.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-20T08:06:05.000Z", "num_tokens": 3767, "size": 14574 }
\section{Library of transformations} \begin{frame}{The library} The library of transformations contains small transformation steps that are proven correct. \begin{itemize} \item Each transformation in here can be used as building block. \item The library of transformations does not achieve complete coverage. \end{itemize} \end{frame} \note{ \begin{itemize} \item Explain what the library of transformations is. \item Explain that the library included within this work is incomplete, e.g. not all possible models can be build from this library. \item Explain in context what the library of transformations is. \end{itemize} } \begin{frame}{In context} \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.3\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/lego_house_roofless.png} \end{column}\begin{column}{0.05\textwidth} \centering + \end{column}\begin{column}{0.2\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/lego_roof_pieces.png} \end{column}\begin{column}{0.05\textwidth} \centering = \end{column}\begin{column}{0.3\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/lego_house_roof_step.png} \end{column} \end{columns} \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.3\textwidth} \centering \rotatebox{90}{$\leftrightarrow$} \end{column}\begin{column}{0.05\textwidth} \centering $\sqcup$ \end{column}\begin{column}{0.2\textwidth} \centering \rotatebox{90}{$\leftrightarrow$} \end{column}\begin{column}{0.05\textwidth} \centering = \end{column}\begin{column}{0.3\textwidth} \centering \rotatebox{90}{$\leftrightarrow$} \end{column} \end{columns} \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.3\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/duplo_house_roofless.png} \end{column}\begin{column}{0.05\textwidth} \centering + \end{column}\begin{column}{0.2\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/duplo_roof_pieces.png} \end{column}\begin{column}{0.05\textwidth} \centering = \end{column}\begin{column}{0.3\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/03_transformation_framework/duplo_house_roof_step.png} \end{column} \end{columns} \end{frame} \begin{frame}{Adding a regular class with instances} Add a class named \textit{Example} to a model. On the instance level, introduce a set of objects with this type. \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.45\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/04_library_of_transformations/class_type.pdf} \end{column}\begin{column}{0.05\textwidth} \centering $\leftrightarrow$ \end{column}\begin{column}{0.45\textwidth} \centering \input{images/04_library_of_transformations/class_as_node_type.tikz} \end{column} \end{columns} \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.45\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/04_library_of_transformations/class_instance.pdf} \end{column}\begin{column}{0.05\textwidth} \centering $\leftrightarrow$ \end{column}\begin{column}{0.45\textwidth} \centering \input{images/04_library_of_transformations/class_as_node_type_instance.tikz} \end{column} \end{columns} \end{frame} \note{ \begin{itemize} \item Shortly explain what a transformation within the library looks like. It has a type level transformation with corresponding instance level transformations. \item Very quickly explain the model and how to apply it. \end{itemize} } \begin{frame}{Adding a string field} Add a field, typed by a string, named \textit{field} to an existing class named \textit{Example}. For all existing objects typed by \textit{Example}, introduce a new value for the field \textit{field}. \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.45\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/04_library_of_transformations/data_field.pdf} \end{column}\begin{column}{0.05\textwidth} \centering $\leftrightarrow$ \end{column}\begin{column}{0.45\textwidth} \centering \input{images/04_library_of_transformations/data_field_as_edge_type.tikz} \end{column} \end{columns} \begin{columns}[c] \begin{column}{0.05\textwidth} \end{column}\begin{column}{0.45\textwidth} \centering \includegraphics[width=0.7\textwidth]{images/04_library_of_transformations/data_field_value.pdf} \end{column}\begin{column}{0.05\textwidth} \centering $\leftrightarrow$ \end{column}\begin{column}{0.45\textwidth} \centering \input{images/04_library_of_transformations/data_field_as_edge_type_value.tikz} \end{column} \end{columns} \end{frame} \note{ \begin{itemize} \item Explain the addition of a string field as last example. \end{itemize} }
{ "alphanum_fraction": 0.6685082873, "avg_line_length": 38.6133333333, "ext": "tex", "hexsha": "3f5315f88a7c094f33143c6bd5bbab32fda5dfec", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca", "max_forks_repo_licenses": [ "AFL-3.0" ], "max_forks_repo_name": "RemcodM/thesis-ecore-groove-formalisation", "max_forks_repo_path": "presentation/tex/04_library_of_transformations.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "AFL-3.0" ], "max_issues_repo_name": "RemcodM/thesis-ecore-groove-formalisation", "max_issues_repo_path": "presentation/tex/04_library_of_transformations.tex", "max_line_length": 201, "max_stars_count": null, "max_stars_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca", "max_stars_repo_licenses": [ "AFL-3.0" ], "max_stars_repo_name": "RemcodM/thesis-ecore-groove-formalisation", "max_stars_repo_path": "presentation/tex/04_library_of_transformations.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1596, "size": 5792 }
%!TEX root = ../thesis.tex %******************************************************************************* %****************************** Third Chapter ********************************** %******************************************************************************* \chapter{Faser Experiment} % **************************** Define Graphics Path ************************** \ifpdf \graphicspath{{ChapterFaser/Figs/Raster/}{Chapter3/Figs/PDF/}{Chapter3/Figs/}} \else \graphicspath{{ChapterFaser/Figs/Vector/}{Chapter3/Figs/}} \fi \section{Detector location} \nomenclature[z-FASER]{FASER}{ForwArd Search ExpeRiment} % first letter Z is for Acronyms \nomenclature[z-TAS]{TAS}{Target Absorbers} \nomenclature[z-TAN]{TAN}{Target Absorber Neutral} \nomenclature[z-LOS]{LOS}{Line Of Sight} The FASER collaboration discovered a disused tunnel, called TI12 (Fig. \ref{fig:infrastructure}), in just the right location to intercept these new particles that could be escaping from collisions in the ATLAS detector. The idea is that these light and weakly-interacting particles produced at the ATLAS IP will travel along the beam collision axis, unaffected by the magnets that bend the beams of particles around the ring of the LHC, through matter (mostly rock and concrete) without interacting and then interact within the FASER detector in TI12. \cite{faser_collaboration_faser:_2019} The schedule is tight. The LHC is warmed up for maintenance until end of 2020, so FASER needs to be built before RUN 3. A third long shutdown is planned for 2024 where the collaboration could do some maintenance of FASER or install FASER 2, a bigger version of FASER. Manufacturing issues on the magnets because of the Corona virus in China have already impacted the schedule. The magnetic blocks needed for the FASER magnets will have a 2 month delay in the delivery. That puts the delivery of the 1st short magnet at the end of April and the 2nd end of May. The long magnet won't be available before end of June. This is an issue because the two short magnets form the support of the tracking detector so it is impossible to install any of the detector until they arrive. This allows more time for the commissioning of the full detector on the surface (in building ENH1). However this means the installation will be done after the cool-down period mid-July and must be completed before end of October when the LHC is handed over to LHC operations and access will be very limited. Between these dates, the tunnel will be inacessible for 2-3 weeks while some electrical tests are performed. On Fig. \ref{fig:infrastructure} we can see that charged particles are deflected by the LHC magnets and neutral hadrons are absorbed by either the TAS or TAN. These are target absorbers designed to protect the magnets. This means that only LLPs will travel from the IP to FASER through 10 meters of concrete and 90 meters of rock. In the SM only muons and neutrinos can reach FASER. However the collaboration expects LLPs to easily pass through all of the material without interacting and decay in FASER. We also see in the bottom right corner of Fig. \ref{fig:infrastructure} that FASER will be located roughly 5 meters from the LHC tunnel. \begin{figure}[htbp!] \centering \includegraphics[width=1.0\textwidth]{FASERinfrastructureTI12.png} \caption[TI12 infrastructure]{TI12's infrastructure} \label{fig:infrastructure} \end{figure} \subsubsection{FLUKA Simulations} Due to the bending induced by the LHC's magnets, the muon flux on LOS will be reduced. Measurement using FLUKA simulations conluded that the $\mu_{-}$ flux will be bent to the left and $\mu_{+}$ to the right of FASER, see Fig. \ref{fig:FLUKA}. These measurements were confirmed using emulsion detectors in 2018. WHAT HAPPENS IF I WRITE THIS ? \begin{figure}[htbp!] \centering \includegraphics[width=0.8\textwidth]{FLUKA.png} \caption[FLUKA]{FLUKA simulation} \label{fig:FLUKA} \end{figure} \subsection{Civil Engineering work} Some civil engineering works are needed. In Figs. \ref{fig:TunnelBefore} and \ref{fig:TunnelAfter} we see old ventilation conduits that have been removed. Furthermore, TI12 having a slight upwards slope, a trench need to be cut out to align FASER with the beam collision axis, see Fig. \ref{fig:CEwork} and \ref{fig:CEwork1}. The digging depth will be around 50 centimeters. \begin{figure}[htbp!] \centering \begin{minipage}{.5\textwidth} \centering \includegraphics[width=.9\linewidth]{TunnelBefore} \caption[Tunnel Before]{Before} \label{fig:TunnelBefore} \end{minipage}% \begin{minipage}{.5\textwidth} \centering \includegraphics[width=.85\linewidth]{TunnelAfter} \caption[Tunnel After]{After} \label{fig:TunnelAfter} \end{minipage} \end{figure} \begin{figure}[htbp!] \centering \includegraphics[width=1.0\textwidth]{ChapterFaser/Figs/Raster/CEworks.JPG} \caption[CE work]{Civil engineering work} \label{fig:CEwork} \end{figure} \begin{figure}[htbp!] \centering \includegraphics[width=1.0\textwidth]{ChapterFaser/Figs/Raster/CEworks1.JPG} \caption[CE work 1]{Civil engineering work} \label{fig:CEwork1} \end{figure} \section{Magnets} and here I write more \dots \section{Instruments - etc}
{ "alphanum_fraction": 0.7353389185, "avg_line_length": 59.6818181818, "ext": "tex", "hexsha": "95a1b657ae578ba9179195ec3054636c76e2500e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0045117c0291bc6328b96b5917553ccd1a5d9245", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eliottlerouge/Master-Thesis-on-FASER---Eliott-JOHNSON", "max_forks_repo_path": "ChapterFaser/ChapterFaser.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0045117c0291bc6328b96b5917553ccd1a5d9245", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eliottlerouge/Master-Thesis-on-FASER---Eliott-JOHNSON", "max_issues_repo_path": "ChapterFaser/ChapterFaser.tex", "max_line_length": 922, "max_stars_count": null, "max_stars_repo_head_hexsha": "0045117c0291bc6328b96b5917553ccd1a5d9245", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eliottlerouge/Master-Thesis-on-FASER---Eliott-JOHNSON", "max_stars_repo_path": "ChapterFaser/ChapterFaser.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1331, "size": 5252 }
% \paragraph{Summary:} \textit{This document provides information and instructions for preparing the full-length paper for the COMPOSITES 2017 Conference (September 20-22, 2017 in Eindhoven, The Netherlands).} \paragraph{Summary:} \textit{Simulations based on the peridynamic theory are a promising approach to understand the processes involved in matrix failure inside fibre reinforced plastics. Before such complex simulations are carried out, the material behavior of bulk resin material as well as the influence of numerical parameters have to be investigated. In the present text, the linear elastic part of the material response is used to examine the convergence behavior of peridynamic simulations. Possibilities to minimize the effect of different discretization schemes are explored by means of a stochastic material distribution in correlation with scatter found in the material tests regarding the elastic material response and failure patterns. This procedure may also be used to investigate the nature of failure initiation and the robustness of the solution.}
{ "alphanum_fraction": 0.8334883721, "avg_line_length": 358.3333333333, "ext": "tex", "hexsha": "bf484a410da441f451ddc9f34bfda6134be45c1a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "oldninja/PeriDoX", "max_forks_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Summary.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "oldninja/PeriDoX", "max_issues_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Summary.tex", "max_line_length": 864, "max_stars_count": null, "max_stars_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "oldninja/PeriDoX", "max_stars_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Summary.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 200, "size": 1075 }
\title{Self Organizing Systems Exercise 2} \author{ Alexander Dobler 01631858\\ Thomas Kaufmann 01129115 } \date{\today} \documentclass[12pt]{article} \usepackage{hyperref} \usepackage{booktabs} \usepackage{graphics} \usepackage{multirow} \usepackage{graphicx} \usepackage{subcaption} \usepackage{mwe} \usepackage{amsmath,amssymb} \usepackage{subcaption} \usepackage[section]{placeins} \begin{document} \maketitle \begin{abstract} In the second exercise of the lecture Self Organizing Systems we are implementing a simple Particle Swarm Optimization (PSO) algorithm and experimenting with different parameters, fitness functions, constraints and constraint handling methods. More specifically, we are given a PSO framework in NetLogo for optimizing functions $f$ from $\mathbb{R}^2$ to $\mathbb{R}$. Our task is to implement 3 different fitness functions, 3 different constraints and constraint handling methods using penalization or solution rejection. Furthermore, we are to conduct several experiments, to observe different effects of parameters on the performance of the convergence behaviour of the PSO algorithm. Here we are inspecting \textit{population size}, \textit{particle speed limit}, \textit{particle inertia}, both \textit{acceleration coefficients} and the difference between constraint handling using penalization and rejection. \end{abstract} \section{Implementation} In this section we will describe how we implemented the required tasks given in the exercise. We will divide this section into explanations for \textit{constraints}, \textit{fitness functions} and \textit{constraint handling with penalization}. \subsection{Constraints} As we are already given skeletons for constraints, implementing is as easy as returning \textit{true}, if the constraint is violated and \textit{false} otherwise. We opted for implementing the following constraints. \begin{enumerate} \item $x^2+y^2<6000$ \item $x^2+y^2<9000\text{ and }x^2+y^2>4000$ \item $\tan(2x)<\tan(4y)$ \end{enumerate} So, if for example for the first constraint it holds that $x^2+y^2>=6000$, the constraint is violated and we return true. We selected these constraints, as we wanted to have functions with one connected region (constraint 1 and 2) and also constraints with miltiple separated regions (constraint 3). \subsection{Fitness Functions} Here we have a similar setting as for constraints, because we already have skeletons for fitness functions. We opted to implement the following functions. \begin{enumerate} \item Schaffer function \item Booth's function \item Schwefel function \end{enumerate} Scaling $x$ and $y$ variables for the input of the functions is done as already shown in the template. It is also important to mention that the NetLogo $\sin$-function expects angles in degrees as input, so we had to convert radians to degrees first. For the Schwefel function we had to set $n:=2$ and $x_1=x,x_2=y$ for our purpose of two dimensions. We chose these functions as we wanted to have both optima at the corners of the grid and optima in the middle of the grid. Furthermore we also have a diversity of how many local optima the search-space of the different functions have. \subsection{Constraint Handling with Penalization} The implementation for penalization is more interesting. First we created 4 different functions to calculate penalization values for each constraint (the example constraint included). So for each constraint we can compute its penalty value at patch $x,y\in\mathbb{R}^2$, if the constraint is violated at this patch. This penalty value is $C(x,y)\cdot d$ for constraints $C(x,y)<0$ and a constant $d$. For example for constraint 1 we have $(x^2+y^2-6000)\cdot d$ as penalty value if $x^2+y^2\ge 6000$. Furthermore we wanted penalty values to be between 0 and 1, so we chose constants $d$ appropriately. So for example for constraint 1 we set $d:=\frac{1}{14000}$ as $C(x,y)=x^2+y^2-6000$ can be as big as $14000$ for $x=y=100$. We then add these penalty values at position $(x,y)$ to the value of the patch at position $(x,y)$ if the selected contraint is violated at position $(x,y)$. This, of course, is only done if penalization is selected as constraint handling method. Due to this variant of implementation, we do not have to update anything in the \textit{update-particle-positions}, as fitness-functions at patches are already considering penalization by the selected constraint. \section{Experiments and Analysis} \subsection{Experimental Setup \& Evaluation Metrics} In order to compare the convergence behaviour, we use a fixed number of up to $200$ iterations, without any time limit. Furthermore, we disabled the premature termination criterion once the optimum is found, since this would have led to bias results in our stepwise average approach. We use the following metrics to capture characteristics of solutions: \begin{itemize} \item \textbf{Fitness}: a value between 0 and 1 \item \textbf{Number of Clusters}: To get a feeling of the distribution of particles in the search space. We use the clustering algorithm of the netlogo plugin \emph{dbscan}, where a cluster is constituted of at least three agents with a maximum distance of $5$. These values have been selected manually based on some manual tweaking and tuning. \begin{figure}[h!] \centering \includegraphics[width=5cm]{figures/clusters.png} \caption{Illustration of Clusters in a constrained Schwefel Function Optimization Instance.} \label{fig:clusters} \end{figure} Figure~\ref{fig:clusters} illustrates a few clusters in an early iteration in a Schaffer optimization instance. With this metric, in combination with others, we aim to identify scenarios where agents split up into separate groups, converging towards different regions in the search space. We normalized the number of clusters by the population size, to foster comparability in plots. \item \textbf{Average Distance to the Optimal Solution}: Should be monotonically decreasing in convex functions. However, in rugged landscapes with several nearly optimal solutions may not necessarily decrease to $\approx 0$. \item \textbf{Average Distance among Particles}: As a measure for the distribution of particles in the search space. Thus, in the beginning it is relatively high, as particles are randomly scattered among the search space, but should decrease continuously as particles strive towards the (single or few) global optima. In case this value is high, a disconnected and highly constrained search may be indicated. \item \textbf{Average Path Length}: Average length of the paths of each particle throughout the entire execution. \end{itemize} Finally, for statistically stable results, each configuration in our experiments was executed $15$ times, and metrics where obtained by the average of those executions. %\subsection{Experiment 1: Strong Personal Focus} %\begin{figure} % \centering % \includegraphics[width=0.75\textwidth]{figures/ex1/ex1-1.pdf} % \label{fig:ex1-1} %\end{figure} %\begin{figure} % \centering % \includegraphics[width=0.75\textwidth]{figures/ex1/ex1-2.pdf} % \label{fig:ex1-2} %\end{figure} % %\begin{figure} % \centering % \includegraphics[width=0.75\textwidth]{figures/ex1/ex1-3.pdf} % \label{fig:ex1-3} %\end{figure} %strong personal focus and scarce distribution in the beginning, hard to find neighbour that guides somewhere %in easier functions even small population sizes %\begin{figure} %\begin{subfigure}[c]{0.5\textwidth} %\centering %\includegraphics[width=0.75\textwidth]{figures/ex1/f1.png} %\subcaption{F1} %\end{subfigure} %\begin{subfigure}[c]{0.5\textwidth} %\centering %\includegraphics[width=0.75\textwidth]{figures/ex1/f3.png} %\subcaption{F3} %\end{subfigure} % %\caption{Comparison of representative solutions for fitness function $1$ ($P=100$) and $3$ ($P=20$).} % %\end{figure} \FloatBarrier \subsection{Different Population Sizes} In the first experiment we compare the impact of the population size on the convergence behaviour. For each function, we compared three different population sizes with fixed parameters for the others (inertia $\omega=0.33$, swarm-confidence $c_s=1$, personal-confidence $c_p=1$ and particle-speed-limit $V_{max}=10$). Observations based on Figures~\ref{fig:ex2-1},\ref{fig:ex2-2},\ref{fig:ex2-3}: \begin{itemize} \item Larger populations are in favour of finding the global optimum or at least converge faster. One reason for this is certainly the relatively restricted search space, where it's rather likely that a random initialization hits high quality regions right in the beginning. We conducted some small experiments with completely unsuitable parameter settings, but large populations that still obtained the global optimum in just a couple of iterations. \item For extraordinarily small populations, e.g. $P=10$, the experiments show that only for rather simple functions without a rugged landscape convergence towards the optimum can be achieved (Booth's function seems to be \emph{convexish}, although it is quadratic). \item For more complex functions, like Schaffer, those populations converge at several poor local optima around the global one, indicated by the relatively high number of different clusters and the high average distance to the optimum. Still, after $30$ iterations they seem to concentrate on a certain subspace, where they get stuck in local optima. \item Although convergence towards a suboptimal solution w.r.t to the fitness sets in rather early, the path lengths still increases, indicating stagnation behaviour. \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=0.75\textwidth]{figures/ex2/ex2-1.pdf} \caption{Population sizes: Metrics for Schaffer Function} \label{fig:ex2-1} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=0.75\textwidth]{figures/ex2/ex2-2.pdf} \caption{Population sizes: Metrics for Booth Function} \label{fig:ex2-2} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=0.75\textwidth]{figures/ex2/ex2-3.pdf} \caption{Population sizes: Metrics for Schwefel Function} \label{fig:ex2-3} \end{figure} \FloatBarrier \subsection{Acceleration Coefficients} Based on our findings we proceeded with experiments concerning acceleration coefficients. From manual tests and the previous experiment, we know that for larger populations our functions can be optimized quite easily with standard parameters, $c_s=1$, $c_p=1$ and $V_{max}=10$, so in this experiment we turned our focus on values off the mean (i.e. $1$) and tested all four combinations for the values $0.3$ and $1.7$ for medium and larger population sizes for each of the selected functions. Observations based on Figures~\ref{fig:ex4-1-20},\ref{fig:ex4-1-50},\ref{fig:ex4-2-20},\ref{fig:ex4-2-50},\ref{fig:ex4-3-20},\ref{fig:ex4-3-50}: \begin{itemize} \item Our main observation from this experiment was that in the selected functions, a dominance of swarm-confidence is favourable in terms of convergence. Even for smaller populations convergence towards a high quality optimum can be obtained even below $20$ iterations. \item Even further, the number of particles temporarily stuck in suboptimal local seems to be lower, indicated by our cluster metric, which in combination with the distance to the global optimum, shows a relatively smooth convergence in just $20$ iterations. \item For other configurations, on the other hand, these metrics show a different picture. First, there's a lot of fluctuations in the number of clusters, indicating that particles frequently change influencing neighbours, although eventually they still converge towards the optimum on average (but with far more particles being in many other regions in the search space). It is important to note that this is actually not necessarily a poor characteristic. For other functions, particularly with larger and potentially more rugged search spaces, we claim that such a behaviour is actually to some degree advantageous since it fosters diversity in the search (still it perfectly shows that parameters highly depend on the instance). \item For the inverse dominance relationship between swarm and personal confidence a rather poor behaviour can be observed. First, this configuration lacks in convergence towards the optimum w.r.t to fitness, but also the other metrics show poor behaviour. Throughout the entire search, the number of distinct clusters remains relatively high, indicating that particles likely do not profit from each other, but rather seem to be stuck in intensifications phases (the path length is still relatively high, indicating that particles still make significant steps but without any real progress in terms of fitness, also shown by the high average distance to the optimum). \item Figure~\ref{fig:ex4-example} shows two representative examples of this behaviour. On the one hand, ~\ref{fig:ex4-example-a} shows almost arbitrarily scattered particles in the search space after $100$ iterations, while in ~\ref{fig:ex4-example-b} all particles were directly guided towards the optimum. \item Again, it can be observed that Booth's function is among the easier ones, where even poor configurations obtain the optimum relatively fast. \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-1-20.pdf} \caption{Acceleration coefficients: Metrics for Schaffer function and $P=20$} \label{fig:ex4-1-20} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-1-50.pdf} \caption{Acceleration coefficients: Metrics for Schaffer function and $P=50$} \label{fig:ex4-1-50} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-2-20.pdf} \caption{Acceleration coefficients: Metrics for Booth function and $P=20$} \label{fig:ex4-2-20} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-2-50.pdf} \caption{Acceleration coefficients: Metrics for Booth function and $P=50$} \label{fig:ex4-2-50} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-3-20.pdf} \caption{Acceleration coefficients: Metrics for Schwefel function and $P=20$} \label{fig:ex4-3-20} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex4/ex4-3-50.pdf} \caption{Acceleration coefficients: Metrics for Schwefel function and $P=50$} \label{fig:ex4-3-50} \end{figure} \begin{figure}[h!] \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex4/ex4-03-17-f3.png} \subcaption{$c_s=0.3$, $c_p=1.7$} \label{fig:ex4-example-a} \end{subfigure} \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex4/ex4-17-03-f3.png} \subcaption{$c_s=1.7$, $c_p=0.3$} \label{fig:ex4-example-b} \end{subfigure} \caption{Comparison of representative solutions for fitness function $1$ ($P=100$) and $3$ ($P=20$).} \label{fig:ex4-example} \end{figure} \FloatBarrier \subsection{Inertia} In the following experiment we studied the impact of the particle inertia on all functions with default values for other parameters: $P=30$, $c_s=1$, $c_p=1$ and $V_{max}=10$. Observations: \begin{itemize} \item It is again illustrated that for a reasonable parameter setting with a sufficiently a large population size, it seems to be rather easy to identify (nearly) optimal regions in the search space, although a too low inertia seems to slow down convergence. \item For high $\omega$ values larger fluctuations can be observed, simply cause it affects the magnitude of the velocity. This behaviour is also reflected in the path lengths being significantly larger than for smaller $\omega$ values. In combination with these, the relatively high distance to the optimum further suggests that we encounter some kind of stagnation behaviour, where particles continuously move in the search space, without achieving any significant progress (e.g. in Schwefel function, $\omega=1$ converges on average to some fitness $<1$, with the particle distance being still relatively high with lots of frequently changing clusters, which suggests that many particles are (relatively loosely) gathered around some suboptimal region without achieving any progress on the fitness, but cannot escape this region due to the high concentration of particles in this region. An illustrative example of such a situation is shown in Figure~\ref{fig:ex3-example}). \item Medium value for $\omega$ seem to be most suitable and provide a good trade-off between fitness convergence as well as dampening fluctuations among clusters, i.e. we suspect that not too many particles get stuck in low-quality local optima, while it seems that the population can identify the optimum soon and guides a majority of particles towards this region. This is also illustrated by the average distance to the real optimum that smoothly converges. Throughout all functions it can be observed that $\omega=0.5$ shows a continuously slower increasing average path length, even slower than low omega values. Here, it is again important to stress the following: in more complex optimization problems, we would assume that such a behaviour can be observed rather on a region level than for \"particular optima\" as it seems to be the case for these functions. \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex3/ex3-1.pdf} \label{fig:ex3-1} \caption{Inertia: Metrics for Schaffer function for inertia comparison.} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex3/ex3-2.pdf} \label{fig:ex3-2} \caption{Inertia: Metrics for Booth function for inertia comparison.} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex3/ex3-3.pdf} \label{fig:ex3-3} \caption{Inertia: Metrics for Schwefel function for inertia comparison.} \end{figure} \begin{figure}[h!] \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex3/wrong-optimum-init.png} \subcaption{$c_s=0.3$, $c_p=1.7$} \label{fig:ex3-example-a} \end{subfigure} \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex3/wrong-optimum.png} \subcaption{$c_s=1.7$, $c_p=0.3$} \label{fig:ex3-example-b} \end{subfigure} \caption{Comparison of a representative execution (\ref{fig:ex3-example-a}: the initial solution and \ref{fig:ex3-example-b}: the final solution after 100 iterations) with $\omega=1$ converging towards a suboptimal optimum.} \label{fig:ex3-example} \end{figure} \FloatBarrier \subsection{Speed Limit} The next few experiments should illustrate the impact of particle speed limit on the behaviour of the algorithm with regards to convergence. Parameters used were $P=20,\omega=0.33,c_s=1,c_p=1$. We compared four different particle speed limits. The plots are illustrated in Figures~\ref{fig:ex5-1}-\ref{fig:ex5-3} and the main observations are: \begin{itemize} \item For smaller speed-limit convergence to the global optimum is slower, but this has to be expected. \item Furthermore also the number of clusters for smaller speed limits is higher. This is visible in the Schwefel function, where we have a lot of clusters, even after 100 iterations for $V_{\max}=3$. \item Interestingly, the path length is not always longest for the highest speed limit. \item Overall one could say, that the speed limit does not have that much of an impact on the Schaffer and Booth function, whereas too low speed limits lead to a lot of scattered particles in the search space for the Schwefel function. But upon closer look, we can also see that some suboptimal speed limits lead to early convergence, not leading to the global optimum. This is also illustrated in Figure~\ref{fig:ex5comp}, where the search space for the Schwefel function is illustrated after 100 iterations. For $V_{\max}=3$ we can see a lot of scattered particles stuck in different local optima, whereas for $V_{\max}=20$ all particles are stuck in one single local optimum. \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex5/ex5-1.pdf} \caption{Speed limit: Metrics for Schaffer function.} \label{fig:ex5-1} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex5/ex5-2.pdf} \caption{Speed limit: Metrics for Booth function.} \label{fig:ex5-2} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex5/ex5-3.pdf} \caption{Speed limit: Metrics for Schwefel function.} \label{fig:ex5-3} \end{figure} \begin{figure}[h!] \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex5/f3-3.png} \subcaption{$V_{max}=3$} \end{subfigure} \begin{subfigure}[c]{0.5\textwidth} \centering \includegraphics[width=0.75\textwidth]{figures/ex5/f3-20.png} \subcaption{$V_{max}=20$} \end{subfigure} \caption{Comparison of representative solutions for fitness function $1$ ($P=100$) and $3$ ($P=20$).} \label{fig:ex5comp} \end{figure} \FloatBarrier \subsection{Constraint Handling} Our last conducted experiment compares the two constraint handling methods, namely penalty and rejection. For this we try out each of the $3\cdot 3=9$ combinations of fitness function and constraints and compare constraint handling by rejection and penalty. The parameters for all experiments are $P=50,c_s=1,c_p=1,V_{\max}=10,\omega=0.33$. All 9 plots are illustrated in Figures~\ref{fig:ex6-1-1}-\ref{fig:ex6-3-3} and we could observe the following: \begin{itemize} \item For constraint 1 and the Schaffer function we have almost the same behaviour for rejection and penalty. This is most certainly due to the fact that the global optimum is at coordinate $(0,0)$, whereas constraint 1 imposes a constraint, where all valid solutions lie in the middle. \item For all other combinations of fitness functions and constraints the penalty method always leads to particles having smaller distance to each other. This is because particles can overcome barriers with the penalty method, whereas with the rejection method particles can be ``stuck'' in some regions. The same arguments also lead to fewer clusters, when using the penalty method. \item It is also worth mentioning that the path length also increases for the penalty method, as particles can walk into invalid regions. \item Regarding convergence to global optima we have similar behaviour for both constraint handling methods, as we used a population size of 50, which is quite high. But for example for the Schaffer function and constraint 3 we have faster convergence to the global optimum for the penalty method, whereas for the rejection method particles converges slowly to a local optimum, which is not globally optimal. \item Overall it can be said, that the penalty method outperforms the rejection method in nearly every aspect. This can of course be the result of the choice of our fitness function and constraints, and also the particular implementation we chose. But we would still prefer penalty over rejection in nearly every scenario. \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-1-Constraint_1.pdf} \caption{Constraint handling: Schaffer function, constraint 1} \label{fig:ex6-1-1} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-1-Constraint_2.pdf} \caption{Constraint handling: Schaffer function, constraint 2} \label{fig:ex6-1-2} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-1-Constraint_3.pdf} \caption{Constraint handling: Schaffer function, constraint 3} \label{fig:ex6-1-3} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-2-Constraint_1.pdf} \caption{Constraint handling: Booth function, constraint 1} \label{fig:ex6-2-1} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-2-Constraint_2.pdf} \caption{Constraint handling: Booth function, constraint 2} \label{fig:ex6-2-2} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-2-Constraint_3.pdf} \caption{Constraint handling: Booth function, constraint 3} \label{fig:ex6-2-3} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-3-Constraint_1.pdf} \caption{Constraint handling: Schwefel function, constraint 1} \label{fig:ex6-3-1} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-3-Constraint_2.pdf} \caption{Constraint handling: Schwefel function, constraint 2} \label{fig:ex6-3-2} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=1\textwidth]{figures/ex6/ex6-3-Constraint_3.pdf} \caption{Constraint handling: Schwefel function, constraint 3} \label{fig:ex6-3-3} \end{figure} \FloatBarrier \section{Conclusion} We have seen how different parameters influence the behaviour of a PSO algorithm. In particular we have considered population size, particle inertia, acceleration coefficients and particle speed limits. We have evaluated different choices for these parameters on 3 different fitness functions. The main observation was that for default choices of parameters the PSO algorithm performs really well on all 3 different fitness functions. One would have to use really abstract parameter choices (small population sizes or suboptimal acceleration coefficients) to lead the PSO algorithm to early convergence and other undesirable behaviour. Furthermore we compared rejection and penalty methods for constraint handling using 3 different constraints. We have seen that the penalty method performs better in nearly all aspects and occasions. This could be due to our choice of fitness functions, constraints and particular implementation. But still we think that some form of penalization has to be prefered over rejection in almost all cases. \end{document}
{ "alphanum_fraction": 0.7811238864, "avg_line_length": 58.2394678492, "ext": "tex", "hexsha": "1b20cc54d2671e842ee69a1e82844e50c4ef0e94", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "tkauf15k/sos2020", "max_forks_repo_path": "ex2/report/report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "tkauf15k/sos2020", "max_issues_repo_path": "ex2/report/report.tex", "max_line_length": 978, "max_stars_count": null, "max_stars_repo_head_hexsha": "b75188097d095e4acaca32290ba4f49fa8cb6c0e", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "tkauf15k/sos2020", "max_stars_repo_path": "ex2/report/report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6772, "size": 26266 }
\documentclass[11pt,letterpaper]{article} \usepackage[numbers,square]{natbib} \renewcommand\cite[1]{(\citet{#1})} \usepackage[hmargin=0.75in]{geometry} \usepackage{color} \usepackage{chemarr} \usepackage{amssymb} \usepackage{graphicx} \usepackage{epstopdf} \usepackage{caption} \usepackage{subcaption} \usepackage{placeins} \usepackage{gensymb} \usepackage{array} %\usepackage{underscore} \newcolumntype{L}{>{\arraybackslash}m{12cm}} \usepackage{comment} \usepackage{enumitem} \title{Opt-IGFEM-2D: Developer's Guide} \author{Marcus Hwai Yik Tan } \date{Created on January 29th, 2016. Last revised on \today} \begin{document} \maketitle \section{Structure of program} \begin{figure}[!h] \centering \includegraphics[width=\linewidth]{CodeStructure.pdf} \caption{The directories (in bold) and subdirectories of the program. \label{fig_code_structure}} \end{figure} \section{Checking sensitivity analysis} The recommended first step to check the sensitivity analysis to is to compare the derivatives of the stiffness matrix and load vector wrt to a design parameter for the coarsest mesh with those obtained with finite difference. The derivatives resulting from the sensitivity analysis can be printed in MATLAB (but not output) by uncommenting the \texttt{\#define SHOW\_DK\_DP} directives in \texttt{sensitivity.h}, \texttt{assemble\_pseudo\_adjoint\_forces.cpp} and \texttt{IGFEM\_element\_pseudo\_adjoint\_forces.cpp}. One can then compare that derivatives with derivatives obtained from finite difference. The finite difference derivatives can be obtained by using the script \texttt{check\_DK\_DP.m} in M\texttt{\_}tests (to be updated). The next step is to compare the derivatives of the objective function obtained from sensitivity analysis with the finite difference derivatives using \texttt{test\_objective\_derivatives.m} in M\texttt{\_}tests \section{Future Development} The case of a moving channel inlet is currently under development for the Opt-IGFEM-3D project. Once this is done, the Opt-IGFEM-2D project would also be updated to include that case. \FloatBarrier \end{document}
{ "alphanum_fraction": 0.7971563981, "avg_line_length": 47.9545454545, "ext": "tex", "hexsha": "2256a7ca67525f348ba8b5480fec0c04faa09989", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "834d07727cd30cfac6368d9bb67202b8a20a4d4c", "max_forks_repo_licenses": [ "NCSA" ], "max_forks_repo_name": "mrcstan/Opt-IGFEM-2D", "max_forks_repo_path": "DevelopersGuide/DevelopersGuide.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "834d07727cd30cfac6368d9bb67202b8a20a4d4c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "NCSA" ], "max_issues_repo_name": "mrcstan/Opt-IGFEM-2D", "max_issues_repo_path": "DevelopersGuide/DevelopersGuide.tex", "max_line_length": 740, "max_stars_count": null, "max_stars_repo_head_hexsha": "834d07727cd30cfac6368d9bb67202b8a20a4d4c", "max_stars_repo_licenses": [ "NCSA" ], "max_stars_repo_name": "mrcstan/Opt-IGFEM-2D", "max_stars_repo_path": "DevelopersGuide/DevelopersGuide.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 558, "size": 2110 }
\section{Introduction} \label{s:intro}
{ "alphanum_fraction": 0.7317073171, "avg_line_length": 10.25, "ext": "tex", "hexsha": "41942fe376f8d9816a60db408e7697e4c344f172", "lang": "TeX", "max_forks_count": 9, "max_forks_repo_forks_event_max_datetime": "2019-08-17T19:29:32.000Z", "max_forks_repo_forks_event_min_datetime": "2016-11-04T22:02:41.000Z", "max_forks_repo_head_hexsha": "2cb570064d01667d6a23acbb6f82d429fcad3b16", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "researchsetup/researchsetup.github.io", "max_forks_repo_path": "docs/example16/content/intro.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "2cb570064d01667d6a23acbb6f82d429fcad3b16", "max_issues_repo_issues_event_max_datetime": "2018-08-30T20:53:46.000Z", "max_issues_repo_issues_event_min_datetime": "2016-11-06T16:48:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "researchsetup/researchsetup.github.io", "max_issues_repo_path": "docs/example16/content/intro.tex", "max_line_length": 23, "max_stars_count": 46, "max_stars_repo_head_hexsha": "2cb570064d01667d6a23acbb6f82d429fcad3b16", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "researchsetup/researchsetup.github.io", "max_stars_repo_path": "docs/example16/content/intro.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-19T18:16:02.000Z", "max_stars_repo_stars_event_min_datetime": "2016-11-05T00:01:36.000Z", "num_tokens": 12, "size": 41 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage[english]{babel} \usepackage{graphicx} \usepackage{multicol} \usepackage{color} \graphicspath{/home/} \usepackage{comment} \title{Something Random} \author{You \\ Your Titile \and Someone Else\\ Something Else } \begin{document} \maketitle \section{First Section} I will say I wrote nothing of any real imporance \begin{multicols}{3} [ \section{Second Section} All human things are subject to decay. And when fate summons, Monarchs must obey. ] Hello, here is some text without a meaning. This text should show what a printed text will look like at this place. If you read this text, you will get no information. Really? Is there no information? Is there. \columnbreak This will be in a new column, here is some text without a meaning. This text should show what a printed text will look like at this place. If you read this text, you will get no information. Really? Is there no information? Is there... \end{multicols} Something else here. The text should not be multicolumned in this part \begin{multicols}{3} [ \section{Third Section} All human things are subject to decay. And when fate summons, Monarchs must obey. ] This is also something random, for I am not sure exactly what is going to happen when I make this a thing but we do not know \columnbreak This will show what is happening in the next column If you read this text, you will get no information. Really? Is there no information? Is there... \end{multicols} \begin{multicols}{3} [ \section{Fourth Section} All human things are subject to decay. And when fate summons, Monarchs must obey. ] You still reading my nonense this text is just filler that is even more useless than loreum ipsum \columnbreak Why are you still reading my random nonsense reading it must be more tedious than me generating it So why are you listening to my random outpourings on the word most of it isnt even spellcheked and no no information \end{multicols} \section{Fifth Section} Well ya made it this far brother here have a cat who made poor life choices \begin{figure}[!htb] \includegraphics[width=\linewidth]{Images/mistake.jpg} \caption{A cat.} \label{fig:cat} \end{figure} Figure \ref{fig:cat} shows a cat in a predicament. \end{document}
{ "alphanum_fraction": 0.758008658, "avg_line_length": 24.8387096774, "ext": "tex", "hexsha": "3e0ab725822f928395afb311caedb525780b5eed", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d23f8fcace86bab233b6a4f0dc7254a7470b57a6", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "KyleSponz/LaTeX-Musings", "max_forks_repo_path": "Whitepaper Template /whitepaper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d23f8fcace86bab233b6a4f0dc7254a7470b57a6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "KyleSponz/LaTeX-Musings", "max_issues_repo_path": "Whitepaper Template /whitepaper.tex", "max_line_length": 105, "max_stars_count": null, "max_stars_repo_head_hexsha": "d23f8fcace86bab233b6a4f0dc7254a7470b57a6", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "KyleSponz/LaTeX-Musings", "max_stars_repo_path": "Whitepaper Template /whitepaper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 603, "size": 2310 }
\paragraph{Answer 1.} The method to answer these questions is simply to try small words by constructing them in order to satisfy the constraints. \begin{enumerate} \item The \label{aba} shortest word \(x\) belonging to L(r) is found by taking \(\epsilon\) in place of \(\lparen a \disjM{} b\rparen\kleeneM\). So \(x = aba\). Let us check if \(x \in L(s)\) or not. \(L(s)\) is made of the union of four sub-languages (subsets). To make this clear, let us remove the useless parentheses on the right side: \begin{equation*} s = \lparen ab \rparen\kleeneM{} \, \disjM{} \, \lparen ba \rparen\kleeneM{} \, \disjM{} \, a\kleeneM{} \, \disjM{} \, b\kleeneM. \end{equation*} Therefore, membership tests on \(L(s)\) have to be split into four: one membership test on \(\lparen ab \rparen\kleeneM\), one on \(\lparen ba \rparen\kleeneM\), one on \(a\kleeneM\) and another one on \(b\kleeneM\). In other words, \(x \in L(s)\) is equivalent to \begin{equation*} x \in L(\lparen ab \rparen\kleeneM) \; \text{or} \; x \in L(\lparen ba \rparen\kleeneM) \; \text{or} \; x \in L(a\kleeneM) \; \text{or} \; x \in L(b\kleeneM). \end{equation*} Let us test the membership with \(x = aba\): \begin{enumerate} \item The words in \(L(\lparen ab \rparen\kleeneM)\) are \(\epsilon\), \(ab\), \(abab\ldots\) Thus \(aba \not\in L(\lparen ab \rparen\kleeneM)\). \item The words in \(L(\lparen ba \rparen\kleeneM)\) are \(\epsilon\), \(ba\), \(baba\ldots\) Hence \(aba \not\in L(\lparen ba \rparen\kleeneM)\). \item The words in \(L(a\kleeneM)\) are \(\epsilon\), \(a\), \(aa\ldots\) Therefore \(aba \not\in L(a\kleeneM)\). \item The words in \(L(b\kleeneM)\) are \(\epsilon\), \(b\), \(bb\ldots\) So \(aba \not\in L(b\kleeneM)\). \end{enumerate} The conclusion is \(aba \not\in L(s)\). \item What is the shortest word belonging to \(L(s)\)? Since the four sub-languages composing \(L(s)\) are starred, it means that \(\epsilon \in L(s)\). Since we showed at the item~(\ref{aba}) that \(aba\) is the shortest word of \(L(r)\), it means that \(\epsilon \not\in L(r)\) because \(\epsilon\) is of length \(0\). \item This question is a bit more difficult. After a few tries, we cannot find any~\(x\) such that \(x \in L(r)\) and \(x \in L(s)\). Then we may try to prove that \(L(r) \cap L(s) = \varnothing\), \emph{i.e.,} there is no such~\(x\). How should we proceed? The idea is to use the decomposition of \(L(s)\) into for sub-languages and try to prove \begin{align*} L(r) \cap L(\lparen ab \rparen\kleeneM) &= \varnothing,\\ L(r) \cap L(\lparen ba \rparen\kleeneM) &= \varnothing,\\ L(r) \cap L(a\kleeneM) &= \varnothing,\\ L(r) \cap L(b\kleeneM) &= \varnothing. \end{align*} If all these four equations are true, they imply \(L(r) \cap L(s) = \varnothing\). \begin{enumerate} \item Any word in \(L(r)\) ends with \(a\) whereas any word in \(L(\lparen ab \rparen\kleeneM)\) finishes with \(b\) or is \(\epsilon\). Thus \(L(r) \cap L(\lparen ab \rparen\kleeneM) = \varnothing\). \item For the same reason, \(L(r) \cap L(b\kleeneM) = \varnothing\). \item Any word in \(L(r)\) contains both \(a\) and \(b\) whereas any word in \(L(a\kleeneM)\) contains only \(b\) or is \(\epsilon\). Therefore \(L(r) \cap L(a\kleeneM) = \varnothing\). \item Any word in \(L(r)\) starts with \(a\) whereas any word in \(L(\lparen ba \rparen\kleeneM)\) starts with \(b\) or is \(\epsilon\). Thus \(L(r) \cap L(\lparen ba \rparen\kleeneM) = \varnothing\). \end{enumerate} Finally, since all the four equations are false, they imply that \begin{equation*} L(r) \cap L(s) = \varnothing. \end{equation*} \item Let us construct letter by letter a word \(x\) which does not belong neither to \(L(r)\) not \(L(s)\). First, we note that all words in \(L(r)\) start with \(a\), so we can try to start \(x\) with \(b\): this way \(x \not\in L(r)\). So we have \(x = b\ldots\) and we have to fill the dots with some letters in such a way that \(x \not\in L(s)\). We use again the decomposition of~\(L(s)\) into four sub-languages and make sure that~\(x\) does not belong to any of those sub-languages. First, because \(x\) starts with \(b\), we have \(x \not\in L(a\kleeneM)\) and \(x \not\in L(\lparen ab \rparen\kleeneM)\). Now, we have to add some more letters such that \(x \not\in L(b\kleeneM)\) and \(x \not\in L(\lparen ba \rparen\kleeneM)\). Since any word in \(L(b\kleeneM)\) has a letter \(b\) as second letter or is \(\epsilon\), we can choose the second letter of \(x\) to be \(a\). This way \(x=ba\ldots \not\in L(b\kleeneM)\). Finally, we have to add more letters to make sure that \begin{equation*} x=ba\ldots \not\in L(\lparen ba\rparen\kleeneM). \end{equation*} Any word in \(L(\lparen ba\rparen\kleeneM)\) is either \(\epsilon\) or \(ba\) or \(baba\ldots\), hence the third letter is \(b\). Therefore, let us choose the letter \(a\) as the third letter of \(x\) and we thus have \(x=baa \not\in L(\lparen ba\rparen\kleeneM)\). In summary, \(baa \not\in L(r), baa \not\in L(b\kleeneM), baa \not\in L(\lparen ba\rparen\kleeneM), baa \not\in L(a\kleeneM), baa \not\in L(\lparen ab\rparen\kleeneM)\), which is equivalent to \(baa \not\in L(r)\) and \(baa \not\in L(\lparen ab \rparen\kleeneM) \cup L(\lparen ba \rparen\kleeneM) \cup L(a\kleeneM) \cup L(b\kleeneM) = L(s)\). Therefore, \(x=baa\) is one possible answer. \end{enumerate}
{ "alphanum_fraction": 0.5798796791, "avg_line_length": 45.3333333333, "ext": "tex", "hexsha": "cd10d93e4361f847860de447d636c8d22e4cc694", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6f302ab1319c8ae9b3ea690c45fdb3d2b6fbca16", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rinderknecht/Book", "max_forks_repo_path": "regexp_answer_01.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6f302ab1319c8ae9b3ea690c45fdb3d2b6fbca16", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rinderknecht/Book", "max_issues_repo_path": "regexp_answer_01.tex", "max_line_length": 71, "max_stars_count": null, "max_stars_repo_head_hexsha": "6f302ab1319c8ae9b3ea690c45fdb3d2b6fbca16", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rinderknecht/Book", "max_stars_repo_path": "regexp_answer_01.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2031, "size": 5984 }
There are several metrics for evaluating different quality factors of written tests. One key property of tests is that they must be able to detect defects in the code, since that is typically the very main reason for writing tests at all. We have chosen to call this property \emph{test efficiency}. Another quality factor is the performance of the tests, i.e. the execution time of the test suite. This section explains the properties of these quality factors, as well as their purpose.\\ Readability and ease of writing of tests is another important quality factor which are not mentioned in this section, since it is hard to measure in an objective way. It also often depends on the testing framework rather than on the tests themselves. Instead, we evaluate these properties for each chosen testing framework in \fref{sec:choices}.\\ \subsection{Test coverage} \label{sec:coverage} \input{theory/quality/coverage.tex} \subsection{Mutation testing} \label{sec:theory_mutation} \input{theory/quality/mutation.tex} \subsection{Execution time} \input{theory/quality/time.tex}
{ "alphanum_fraction": 0.8020351526, "avg_line_length": 40.037037037, "ext": "tex", "hexsha": "228ec1064eb257ffb63472ce87b5fd1cfc684069", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "nip3o/master-thesis", "max_forks_repo_path": "rapporter/final-report/theory/quality/quality.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "nip3o/master-thesis", "max_issues_repo_path": "rapporter/final-report/theory/quality/quality.tex", "max_line_length": 72, "max_stars_count": null, "max_stars_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "nip3o/master-thesis", "max_stars_repo_path": "rapporter/final-report/theory/quality/quality.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 241, "size": 1081 }
\subsection{Metronome} This plugin can be used as a metronome to keep time during music practice. It supports two modes of operation, depending on it being started from the plugin menu or as viewer for tempomap (\verb:.tempo:) files. The sound is a piercing square wave that can be heard well also through loud music from a band. In addition, the display also indicates the beats while playing so that you can discreetly place the device in your sight for checking the tempo instead of wearing headphones at a concert. \subsubsection{Simple Interactive Mode} This is the mode of operation that is active when starting the plugin directly from the menu. It offers a uniform metronome sound at a constant tempo. You can adjust the tempo through the interface or by tapping it out on the appropriate button. \begin{btnmap} \PluginExit \opt{HAVEREMOTEKEYMAP}{& } & Exit plugin \\ \PluginCancel \opt{HAVEREMOTEKEYMAP}{& \PluginRCCancel} & Stop \\ \PluginSelectRepeat \opt{HAVEREMOTEKEYMAP}{& \PluginRCCancel} & Start \\ \PluginSelect \opt{HAVEREMOTEKEYMAP}{& \PluginRCSelect} & Tap tempo \\ \PluginLeft{} / \PluginRight \opt{HAVEREMOTEKEYMAP}{& \PluginRCLeft{} / \PluginRCRight} & Adjust tempo \\ \opt{scrollwheel}{\PluginScrollFwd{} / \PluginScrollBack} \nopt{scrollwheel}{\PluginUp{} / \PluginDown} \opt{HAVEREMOTEKEYMAP}{& \PluginRCUp{} / \PluginRCDown} & Adjust volume \\ \opt{IRIVER_H100_PAD,IRIVER_H300_PAD,SANSA_E200_PAD,SAMSUNG_YH820_PAD}{ \ButtonRec \opt{HAVEREMOTEKEYMAP}{& } & Sync tap \\} \end{btnmap} \subsubsection{Programmed Track Mode} When starting the plugin as a viewer for tempomap files (ending in \verb:.tempo:), it starts in the track mode that offers playback of a preprogrammed metronome track consisting out of multiple parts, each with possibly different properties. In contrast to the simple mode, there exists the notion of meter and bars, along with emphasis on certain beats. Parts can have these properties: \begin{itemize} \item finite or infinite duration in bars (navigation only jumps to the beginning of infinite parts), \item differing meters (4/4, 3/4, 6/8, etc., default 4/4), \item differing tempo (always in quarter beats per minute, default 120) with \begin{itemize} \item one tempo per bar or even one tempo per beat, or \item smooth tempo changes with configurable acceleration, and \end{itemize} \item custom beat patterns (tick/tock/silence on each beat), default being emphasis (tick) on first beat, normal sound (tock) on others. \end{itemize} \paragraph{The button mapping} is different to enable navigation in the programmed track. \begin{btnmap} \PluginExit \opt{HAVEREMOTEKEYMAP}{& } & Exit plugin \\ \PluginCancel \opt{HAVEREMOTEKEYMAP}{& \PluginRCCancel} & Stop (stay at position) \\ \PluginSelect \opt{HAVEREMOTEKEYMAP}{& \PluginRCSelect} & Start from / Stop at current position \\ \PluginLeft{} / \PluginRight \opt{HAVEREMOTEKEYMAP}{& \PluginRCLeft{} / \PluginRCRight} & Seek in track \\ \opt{scrollwheel}{\PluginScrollFwd{} / \PluginScrollBack} \nopt{scrollwheel}{\PluginUp{} / \PluginDown} \opt{HAVEREMOTEKEYMAP}{& \PluginRCUp{} / \PluginRCDown} & Adjust volume \\ \opt{IRIVER_H100_PAD,IRIVER_H300_PAD,SANSA_E200_PAD,SAMSUNG_YH820_PAD}{ \ButtonRec \opt{HAVEREMOTEKEYMAP}{& } & Sync tap \\} \end{btnmap} \paragraph{Navigation} The display indicates the part properties and position in track as such: \begin{verbatim} Metronome Track --------------- "Interlude" 3/4@120 V-25 P2/13: B1/5+2 \end{verbatim} In this example, the part label is ``Interlude'', the meter is 3/4 and the tempo 120 quarter beats per minute (bpm). The volume setting is at -25 and this is the second part of a track with 13 total. In that part, the position is at the second beat of the first bar of five. \paragraph{The syntax of programmed tracks} in tempomap files follows the format defined by \url{http://das.nasophon.de/klick/}. Actually, the goal is to keep compatibility between klick and this Rockbox metronome. The parts of a track are specified one line each in this scheme (pieces in [] optional): \begin{verbatim} [name:] bars [meter ]tempo[-tempo2[*accel|/accel] [pattern] [volume] \end{verbatim} The bar count and tempo always have to be specified, the rest is optional. One example is \begin{verbatim} part I: 12 3/4 133 \end{verbatim} for a part named ``part I'' , 12 bars long, in 3/4 meter with a tempo of 133 quarter beats per minute. Tempo changes are indicated by specifying a tempo range and the acceleration in one of these ways: \begin{verbatim} 0 4/4 90-150*0.25 0 4/4 150-90/4 16 4/4 100-200 \end{verbatim} The first one goes from 90 to 150 bpm in an endless part with 0.25 bpm increase per bar. The second one goes down from 150 to 90 with 4 bars per bpm change, which is the same acceleration as in the first line. The last one is a part of 16 bars length that changes tempo from 100 to 200 smoothly during its whole lifetime (6.25 bpm/bar). For details on how the acceleration works, see \url{http://thomas.orgis.org/science/metronome-tempomath/tempomath.html}. It is also possible to provide a tempo for each individual beat in a part by separating values with a comma (no spaces), \begin{verbatim} varibeat: 3 4/4 135,90,78,100,120,120,99,100,43,94,120,133 \end{verbatim} where the beat duration is first according to 135 bpm, then 90 bpm, and so forth. You are required to provide a value for each beat in all bars of the part. You can provide a pattern that controls how the beats are played: \begin{center} \begin{tabular}{c|l} Symbol & Meaning \\ \hline X & emphasized beat (Tick) \\ x & normal beat (Tock) \\ . & silent beat \end{tabular} \end{center} Some examples: \begin{verbatim} default: 0 4/4 120 Xxxx rockon2: 0 4/4 120 xXxX solea: 0 12/4 180 xxXxxXxXxXxX shuffle: 0 12/12 120 x.xX.xx.xX.. funky: 0 16/16 120 x.x.X..X.Xx.X..X \end{verbatim} The 12/12 for the shuffle create 1/4 triplets. Just do a bit of math;-) This is still a metronome, not a drum machine, but it can act like a basic one, helping you to figure out a certain rhythm within the meter. The UI is developed so that it fits into the display of a Sansa Clip+ and that is the hardware device it is tested on. It seems to work reasonably on some other models in the simulator. At last, a more complete tempomap file: \begin{verbatim} # An example track exercising the programmable Rockbox metronome # or also http://das.nasophon.de/klick/. lead-in: 1 4/4 120 XXXX 0.5 # 4 emphasized but less loud ticks intro: 4 4/4 120 # standard beat tearing down: 4 120-90 # changing tempo from 120 to 90 break: 2 1/4 90 # 2 1/4 bars at 90 rolling: 2 6/8 90 # 2 6/8 at same tempo (quarters!) rumbling: 4 3/4 90 X.x # 3/4, first (tick) and last (tock) ramp-up: 8 2/4 90-150 # speeding up to 150 bpm again flow: 4 150 # steady 4/4 at 150 bpm death: 8 150-60 # going down to 60 final: 1 1/1 60 # one last hit \end{verbatim}
{ "alphanum_fraction": 0.6982328342, "avg_line_length": 35.3, "ext": "tex", "hexsha": "fca34eb91fed43045b4b1f850311cfcc73b6642f", "lang": "TeX", "max_forks_count": 15, "max_forks_repo_forks_event_max_datetime": "2020-11-04T04:30:22.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-21T13:58:13.000Z", "max_forks_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_forks_repo_path": "manual/plugins/metronome.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_issues_repo_issues_event_max_datetime": "2018-05-18T05:33:33.000Z", "max_issues_repo_issues_event_min_datetime": "2015-07-04T18:15:33.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_issues_repo_path": "manual/plugins/metronome.tex", "max_line_length": 75, "max_stars_count": 24, "max_stars_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_stars_repo_path": "manual/plugins/metronome.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-05T14:09:46.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-10T08:43:56.000Z", "num_tokens": 2163, "size": 7413 }
\subsection{Indifference curves} We have \(U=f(x,y)\) An indifference curve is a curve where a consumer is indifferent to all points on it. \(f(x,y)=c\) \subsection{Marginal rate of substitution} The marginal rate of substitution is the amount of one good that a customer is willing to give up for another. This is the gradient of the indifference curve. \(MRS(x_1, x_2)=\dfrac{MU(x_1)}{MU(x_2)}\) \(MU(x_1) = \dfrac{1}{x_1}\alpha_1\prod_i x_i^{\alpha_i}\) \(MU(x_2) = \dfrac{1}{x_2}\alpha_2\prod_i x_i^{\alpha_i}\) \(MRS(x_1, x_2)=\dfrac{\dfrac{1}{x_1}\alpha_1\prod_i x_i^{\alpha_i}}{\dfrac{1}{x_2}\alpha_2\prod_i x_i^{\alpha_i}}\) \(MRS(x_1, x_2)=\dfrac{\dfrac{1}{x_1}\alpha_1}{\dfrac{1}{x_2}\alpha_2}\) \(MRS(x_1, x_2)=\dfrac{\dfrac{\alpha_1}{x_1}}{\dfrac{\alpha_2}{x_2}}\)
{ "alphanum_fraction": 0.6848101266, "avg_line_length": 29.2592592593, "ext": "tex", "hexsha": "341dae9a0d949097b191320487e86b16a68ba1f8", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/economics/consumer/01-02-indifference.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/economics/consumer/01-02-indifference.tex", "max_line_length": 116, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/economics/consumer/01-02-indifference.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 317, "size": 790 }
\chapter*{ABSTRACT} \textit{The concept of the Internet of Things has been widely used in everyday life in various fields, such as agriculture, health, industry, security, and transportation. One of the problems that are often encountered in today's era is the parking system that still uses conventional methods to record the license plate number of the vehicle that will be parked. The purpose of this research is to create a system that utilizes Internet of Things technology in the transportation sector. The system in question is an automatic parking system that uses RFID reading and vehicle plate image recognition. The microcomputer used is a Raspberry Pi and the sensors used are RFID MFRC522, ultrasonic sensor HC-SR04, servo SG90, and Raspberry Pi V2 camera. This research also uses the web as a User Interface which is made with the Python and Flask programming languages as the framework.} \textit{The results obtained are the design of tools and web that are integrated with each other. All sensors used will be connected to the RaspberryPi. The data obtained from the sensor will be stored on the database and will be displayed on the web as a user interface. The data displayed is the ID data taken from the RFID sensor and the vehicle plate number data taken from the camera. The ultrasonic sensor will take vehicle distance data and the servo functions as a parking barrier. The web created has several features such as viewing filled and available slots, registering new users, adding balances, and viewing information about parking slots.} \begin{table}[h] \begin{tabular}{ p{0.17\textwidth} p{0.8\textwidth} } \\ \textbf{Keywords :} & \textit{Internet of Things}, \textit{Smart Parking System}, \textit{Raspberry Pi}, \textit{Web}, \textit{Database} \end{tabular} \end{table}
{ "alphanum_fraction": 0.7882611081, "avg_line_length": 151.9166666667, "ext": "tex", "hexsha": "c84bbf1e856817de13d5ed8b8f0ea34689df89af", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0f410a402dfb920d7e93a9e360acbeb716e4a64b", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "FikriSatria11/project-skripsi2", "max_forks_repo_path": "include/halaman-abstract.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0f410a402dfb920d7e93a9e360acbeb716e4a64b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "FikriSatria11/project-skripsi2", "max_issues_repo_path": "include/halaman-abstract.tex", "max_line_length": 882, "max_stars_count": null, "max_stars_repo_head_hexsha": "0f410a402dfb920d7e93a9e360acbeb716e4a64b", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "FikriSatria11/project-skripsi2", "max_stars_repo_path": "include/halaman-abstract.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 397, "size": 1823 }
% Chapter Template \chapter{Topical n-grams model} % Main chapter title \label{topicalngram} % Change X to a consecutive number; for referencing this chapter elsewhere, use \ref{ChapterX} \lhead{Chapter 5. \emph{Topical n-grams model}} % Change X to a consecutive number; this is for the header on each page - perhaps a shortened title %---------------------------------------------------------------------------------------- % Introduction %---------------------------------------------------------------------------------------- Joint sentiment and topic models have been used to tackle this classification problem. Despite having a hierarchical structure, these generative models have a bag of words assumption. Due to this fact, they tend to misclassify texts having sentiment in the form of phrases. \textit{LDA} and it's extensions don't work properly with phrases. To tackle this situation, we propose an unsupervised approach to sentiment analysis using the topical n-grams model which has been shown to be effective with phrases. We train the topical n-grams model using two topics i.e., positive, and negative, list of positive and negative words, and rules to detect positive and negative phrases. New documents are then classified using this trained model. The system gives better results than the existing Joint Sentiment Topic model. We also propose an approach to generate a list of positive and negative words using LDA based on our observations reported in~\cref{experiments}. \section{Introduction} In the next section, we will discuss the topical n-gram model in detail. \section{Topical n-grams model} n-gram phrases (or collocations) are fundamentally important in many areas of natural language processing (e.g., parsing, machine translation and information retrieval). Phrase as the whole carries more information than the sum of its individual components, thus it is much more crucial in determining the topics of document collections than individual words \citep*{wang2005note}. However, most of the topic models assume that words are generated independently to each other, i.e., under the bag of words assumption. The possible over complicacy caused by introducing phrases makes these topic models completely ignore them. It is true that these models with the bag of words assumption have enjoyed a big success, and attracted a lot of interests from researchers with different backgrounds. A topic model considering phrases would be more useful in certain applications. Topical n-grams model is one such generative model. It's generative process is explained as follows, \begin{enumerate} \item Draw multinomial \(\phi_z\) from a Dirichlet prior \(\beta\); \item Draw binomial \(\psi_z\) from a Beta prior \(\gamma\); \item Draw multinomial \(\sigma_{zw}\) from a Dirichlet prior \(\delta\); \item For each document d, draw a multinomial \(θ^{(d)}\) from a Dirichlet prior \(\alpha\); then for each word \({w_i}^{(d)}\) in document \(d\), \begin{enumerate} \item Draw \({x_i}^{(d)}\) from binomial \(\psi_{{w_{i-1}}^{(d)}}\); \item Draw \({z_i}^{(d)}\) from multinomial \(\theta_{(d)}\); \item Draw \({w_i}^{(d)}\) from multinomial \(\sigma_{{w_{i-1}}^{(d)}}\) if \({x_i}^{(d)} = 1\); else draw \({w_i}^{d}\) from multinomial \(\phi_{{z_i}^{(d)}}\). \end{enumerate} \end{enumerate} The main point to infer from this generative process is that the topic assignments for the two terms in a bigram are not required to be identical. In the description of topic n-grams given in \citep*{wang2005note}, they have used the topic of the last term as the topic of the phrase. But in our experiments, we have used certain rules as prior information to assign topics to a phrase initially. \includegraphics[width=\textwidth]{topicalngram.png} \begin{center} Figure 5.1 Topical n-grams model \end{center} \subsection{Inferencing} Gibbs sampling is used to conduct approximate inference in this paper. During Gibbs sampling, we draw the topic assignment \(z_i\) and the bigram status \(x_i\) iteratively for each word \(w_i\) according to the following conditional probability distribution: \begin{equation} p(z_i,x_i | z_{-i},w_{-i},w,\alpha,\beta,\gamma,\delta) \propto \frac{\gamma_{x_i} + p_{z_{i-1}w_{i-1}x_i}}{\sum_{k=0}^1 {(\gamma_k+p_{z_{i-1}}w_{i-1}k)}} {(\alpha_{z_i} + q_{d_{z_i}})} \times \left\{ \begin{array}{l l} \frac{\beta_{w_i} + n_{z_i w_i}}{\sum_{v=1}^V {(\beta_v+n_{z_i v})}} & \quad \text{if $x_i$ is even}\\ \frac{\delta_{w_i} + m_{z_i w_{i-1} w_i}}{\sum_{v=1}^V {(\delta_v + m_{z_i w_{i-1} v})} } & \quad \text{if $n$ is odd} \end{array} \right. \end{equation} where, \(z_{-i}\) denotes the topic assignments for all word tokens except word \(w_i\), \\ \(x_{-i}\) represents the bigram status for all tokens except word \(w_i\), \\ \(n_{zw}\) represents how many times word \(w\) is assigned into topic \(z\) as a unigram, \\ \(m_{zwv}\) represents how many tmes word \(v\) is assigned as the second term of a bigram given the previous word \(w\), \\ \(p_{zwk}\) denotes how many times the status variable \(x\) equals \(k\) given the previous word \(w\) and previous word's topic \(z\), and \\ \(q_{dz}\) represents how many times a word is assigned to topic \(z\) in document d. In next section, we will explain the use of topical n-grams model for sentiment analysis \section{Topical n-grams model for Sentiment Analysis} To make use of topical n-grams model for sentiment classification, we use an approach similar to using LDA for sentiment classification. \begin{itemize} \itemsep0em \item Set number of topics equal to 2. \item Remove stop-words. \item Remove objective words as they won't affect sentiment. The objective words in this case do not include the negation words like \textit{doesn't, won't, no}, etc. This is to ensure that we can catch negation of polarity when they are used with subjective words. \item Apply Gibbs Sampling with prior. The prior used in this case is more sophisticated and can handle both words and phrases. In case of words, if is not present in a bigram then simply use a list of positive and negative words to assign positive or negative topic. If the word is present in a bigram then assign it the topic of the bigram. There are some rules to detect and assign topics to bigrams which are explained next. \item Use the trained model to classify a new document as positive or negative. \end{itemize} \subsubsection*{Rules for Topic assignment of phrases} At present, our rules are restricted to bigrams. We plan to extend them as explained in Section~\cref{conclusions}. In the following rules, we mean topic when we say polarity. The use of polarity makes it easy to understand the rules as they are concerned with subjectivity. \begin{enumerate} \itemsep0em \item If the first word in the bigram is a negation word and the second word is subjective then the polarity of the bigram is opposite to the polarity of the second word. \\ \textbf{Examples:} \textit{won't like, won't regret, etc.}. Here, \textit{won't like} is assigned negative polarity and \textit{won't regret} is assigned positive polarity. \item If both the words in the bigram are subjective then are two cases. If both words are of the same polarity then resultant polarity is the same. But if their polarities are different, then the polarity of the first word is assigned to the bigram. \\ \textbf{Examples:} \textit{beautifully amazing} is positive as both words as positive. \textit{lack respect} is assigned negative as per the rules. \end{enumerate} One salient feature of this approach is that it is an unsupervised method and can work on any domain. \section*{SUMMARY} In this chapter, we dicussed the motivation behind the topical n-grams model. Later on, we discussed the inference equation used by it. At the end, we proposed an approach to use this model for sentiment analysis. The experiments and results obtained are discussed in ~\cref{experiments}. In the next chapter, we will show the use of deep semantics for sentiement analysis.
{ "alphanum_fraction": 0.7322640345, "avg_line_length": 64.3253968254, "ext": "tex", "hexsha": "700fa32ce858aa3d6059519f08220874d5d94103", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "nikolodien/Paper-submissions", "max_forks_repo_path": "Report/tex_files/Chapters/topicalngram.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "nikolodien/Paper-submissions", "max_issues_repo_path": "Report/tex_files/Chapters/topicalngram.tex", "max_line_length": 183, "max_stars_count": null, "max_stars_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "nikolodien/Paper-submissions", "max_stars_repo_path": "Report/tex_files/Chapters/topicalngram.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2057, "size": 8105 }
\chapter{Prototyping the Semantics of a Domain-Specific Modeling Language} \label{chap:prototype-semantics} \input{prototype-semantics/abstract} \input{prototype-semantics/introduction} %\input{prototype-semantics/dsl} \input{prototype-semantics/prototyping-semantics} \input{prototype-semantics/visualization} \input{prototype-semantics/verification} \input{prototype-semantics/related-work} \input{prototype-semantics/conclusions-and-future-work}
{ "alphanum_fraction": 0.8249452954, "avg_line_length": 25.3888888889, "ext": "tex", "hexsha": "37900fca57187267d072f7324464db101cbfd125", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ljpengelen/latex-phd-thesis", "max_forks_repo_path": "prototype-semantics/prototype.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ljpengelen/latex-phd-thesis", "max_issues_repo_path": "prototype-semantics/prototype.tex", "max_line_length": 74, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ljpengelen/latex-phd-thesis", "max_stars_repo_path": "prototype-semantics/prototype.tex", "max_stars_repo_stars_event_max_datetime": "2019-12-18T21:53:57.000Z", "max_stars_repo_stars_event_min_datetime": "2019-12-18T21:53:57.000Z", "num_tokens": 106, "size": 457 }
\section{Optimizations} \subsection{File Loading} When loading a data resource, the greatest bottleneck lies in fetching from a persistent storage medium. Doing multiple IO operations on a hard disk for example, can heavily increase the load time of a file due to their mix with other IO operations from the rest of the Operating System\footnote{Programs do not have exclusive ownership of the hard disk in a multitasking environment}. Having that in mind, resources from Hard Drive or other persistent storages are best loaded with the fewest IO operations that can be done. In this case, files are being read in a single shot. \subsection{Rendering State Changes Minimization} Often alteration of the driver state (through OpenGL calls for example), may result in stealing precious time from the program. This occurs because a possible driver call might change the state in our graphics card hardware or not depending on the action we took.\footnote{Although, modern drivers often try to defer as much state change as possible till the next draw call} Therefore, redundant graphics API calls are avoided and similar ones are batched as much as possible. \subsection{Deferred Rendering} With the traditional Forward Rendering pipeline, each vertex passes through the GPU processing pipeline regardless if it is visible or not in the end. That means that lots of fragments reach the final shading stage without even being visible. In addition, this approach does not scale well with more lights because the more lights there are, the more unnecessary shading operations are required. This rendering overhead can be avoided by developing a way to shade only the final visible fragments. This is achieved through a ``Deferred Rendering'' pipeline that is composed in 2 main passes: \begin{enumerate} \item Geometry Pass \item Lighting Pass \end{enumerate} \subsubsection{Geometry Pass} \begin{figure}[ht] \centering \includegraphics[scale=0.3, clip=true]{./image/gbuffer.png} \caption{Position, Normal and BaseColor GBuffer textures} \label{fig:gbuf} \end{figure} In the Geometry Pass each object is rendered once. Base color, metalness, roughness, reflectivity, and world space normals are rendered into framebuffer targets (textures) for each final fragment. This logical grouping of textures is called GBuffer (Geometry Buffer). The GBuffer is set as a Multiple Render Target output in the Geometry Pass, so in the end of the fragment shading stage all data mentioned above are saved to their respective texture in the GBuffer which, in turn is bound and used when shading the given fragment in the Lighting Pass. The visualized contents of some GBuffer textures can be seen in figure~\ref{fig:gbuf}. \subsubsection{Lighting Pass} The lighting pass consists of multiple individual light passes; one per direct light plus one for the environmental lighting. For each sub-pass the lighting for each screen spce fragment is computed, using data from the GBuffer and bound ShadowMaps. Starting with an empty accumulation texture buffer (black) the final light contribution from every pass is added. For directional lights and environmental lighting a full screen quad render is performed, while for point lights only fragments of the area they affect are being rendered. \begin{figure}[h] \centering \includegraphics[scale=0.18, clip=true]{./image/lightpasses.png} \caption{Directional light, Environmental light and Accumulated light} \label{fig:lpasses} \end{figure} \subsection{Bounding Spheres Optimization} When making the point light passes it is desirable that we shade only the visible fragments that reside inside the light source's volume of effect; light from emitters attenuates and contributes negligibly to distant surface points. Therefore, only points within a certain radius from the (point) light source center need to be evaluated for lighting, saving significant shading time. The first attempt on this would be the rendering of a sphere around the point light position instead of a screen quad pass. The problems with this simplistic approach are directly noticeable: First, the screen projection of a sphere is a two-dimensional (screen x, y) ellipsoid, preventing the culling of fragments beyond its z-extents. Second as soon as the camera center of projection enters the light volume the point light disappears due to the back face culling being enabled. Disabling back face culling would not be a viable option as this leads to increased light when outside the sphere (because we render both faces) and inverse of the original effect if only front face culling is enabled (light only when inside the volume). To bypass these problems a smart trick using the stencil buffer can be used. It can be separated in two main parts: \begin{enumerate} \item Mark affected fragments in the Stencil Buffer \item Render sphere with Stencil Test enabled \end{enumerate} In order to mark the affected fragments in the Stencil Buffer we rely on the fact that when we look on the bounding sphere from the camera point of view: \begin{enumerate} \item Both bounding sphere's front and back face polygons are behind an object that is in front of it \item Both bounding sphere's front and back face polygons are in front of an object that is behind it \item The front face polygons are in front of but the back face polygons are behind an object that is inside the bounding sphere \end{enumerate} So in order to use this with the Stencil Buffer we take the following steps: \begin{enumerate} \item Disable writing into the depth buffer, making it read-only \item Disable back face culling, in order to process all the polygons of the sphere \item Set the stencil test to always succeed (What we really care is the operation) \item Configure the stencil operation for back facing polygons to increment the value in the stencil buffer when the depth test fails but keep it unchanged when either depth test or stencil test succeeds \item Configure the stencil operation for front facing polygons to decrement the value in the stencil buffer when the depth test fails but keep it unchained when either depth test or stencil test succeeds \item Render the light sphere using null shaders and disabled output to affect only stencil buffer \end{enumerate} This way when an object is outside the bounding volume the stencil buffer is balanced out by the decrement of the front face polygons and the increment of the back face polygons of the bounding sphere. The result is a stencil buffer with non zero parts of the object fragments that are affected by the light. Using this the point light can now be rendered with the stencil test enabled and passing when the stencil value is not equal to zero. Last but not least, front face culling must be enabled before making the point light pass; this must be done because the viewer may be inside the light volume and if back face culling is used as normally, light will not be visible until viewer exit its volume.
{ "alphanum_fraction": 0.8000850702, "avg_line_length": 67.1714285714, "ext": "tex", "hexsha": "2454e89c51fbda909b2242ca4c3d4f5524fcc611", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "880c8730cb5c271def472df76fa655df1970b94b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ScaryBoxStudios/TheRoom", "max_forks_repo_path": "doc/optimizations.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "880c8730cb5c271def472df76fa655df1970b94b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ScaryBoxStudios/TheRoom", "max_issues_repo_path": "doc/optimizations.tex", "max_line_length": 132, "max_stars_count": 1, "max_stars_repo_head_hexsha": "880c8730cb5c271def472df76fa655df1970b94b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ScaryBoxStudios/TheRoom", "max_stars_repo_path": "doc/optimizations.tex", "max_stars_repo_stars_event_max_datetime": "2015-11-12T11:40:45.000Z", "max_stars_repo_stars_event_min_datetime": "2015-11-12T11:40:45.000Z", "num_tokens": 1468, "size": 7053 }
% section3 \chapter {Determinants} \section{Determinants; cofactor Expansion} \begin{exer} Compute the determinants of the matrix A: \begin{equation*} A=\begin{bmatrix}-4 & 1 & 1 & 1 & 1 & \\ 1 & -4 & 1 & 1 & 1 \\ 1 & 1 & -4 & 1 & 1 \\ 1 & 1 & 1 & -4 & 1 \\ 1 & 1 & 1 & 1 & -4\end{bmatrix}\\. \end{equation*} How can you construct $A$ brilliantly? \end{exer} \begin{sol} \begin{verbatim} A=ones(5)-5*eye(5); disp('A is'); disp(A); disp('Determinant of A is'); disp(det(A)); \end{verbatim} \begin{outputs} \begin{verbatim} A is -4 1 1 1 1 1 -4 1 1 1 1 1 -4 1 1 1 1 1 -4 1 1 1 1 1 -4 Determinant of A is -5.5511e-14 \end{verbatim} \end{outputs} \end{sol} \vspace{3mm} \begin{exer} Show that $$\det\left(\begin{bmatrix} \displaystyle a & b & c & d \\ -b & a & d & -c \\ -c & -d & a & b \\ -d & c & -b & a \end{bmatrix}\right)=(a^2+b^2+c^2+d^2)^2.$$ \end{exer} \begin{sol} \begin{verbatim} syms a b c d; A=[a b c d; -b a d -c; -c -d a b; -d c -b a]; disp('Given matrix is'); disp(A); disp('Determinant of the given matrix is'); disp(simplify(det(A))); \end{verbatim} \begin{outputs} \begin{verbatim} Given matrix is [ a, b, c, d] [ -b, a, d, -c] [ -c, -d, a, b] [ -d, c, -b, a] Determinant of the given matrix is (a^2 + b^2 + c^2 + d^2)^2 \end{verbatim} \end{outputs} \end{sol} \vspace{3mm} \begin{exer} The $n$th-order \textbf{Fibonacci matrix} [named for the Italian mathematician~(circa~1170~-~1250)] is the $n \times n$ matrix $F_{n}$ that has 1's on the main diagonal, 1's along the diagonal immediately above the main diagonal, -1's along the diagonal immediately below the main diagonal, and zeros everywhere else. Construct the sequence $$\det(F_{1}), \,\det(F_{2}), \,\det(F_{3}), \,\cdots, \det(F_{7}).$$ Make a conjecture about the relationship between a term in the sequence and its two immediate predecessors, and then use your conjecture to make a guess at $\det(F_{8})$. Check your guess by calculating this number. \end{exer} \begin{sol} \begin{verbatim} % Construct the 10x10 Fibonacci matrix F. N=10; nOnes=ones(N, 1); F=diag(nOnes)+diag(nOnes(1:N-1),1)-diag(nOnes(1:N-1),-1); for n=1:7 % n is from 1 to 7 Fn=F(1:n,1:n); % nxn Fibonacci matrix is selected from F. disp(det(Fn)); end \end{verbatim} \begin{outputs} \begin{verbatim} 1 2 3 5 8 13 21 \end{verbatim} \end{outputs} \noindent The constructed sequence satisfies the relationship $\det(F_n)=\det(F_{n-1})+\det(F_{n-2}),$ for $\det(F_1)=1$ and $\det(F_2)=2$. From that, we may guess that $\det(F_8)=34$. MATLAB gives us the same output value 34 as our guess. \end{sol} \vspace{3mm} \begin{exer} Let $A_{n}$ be the $n \times n$ matrix that has 2's along the main diagonal, 1's along the diagonals immediately above and below the main diagonal, and zeros everywhere else. Make a conjecture about the relationship between $n$ and $\det(A_{n})$. \end{exer} \begin{sol} \begin{verbatim} format rat; % Construct the 10x10 matrix A satisfying given conditions. n=10; nOnes=ones(n, 1); A=2*diag(nOnes)+diag(nOnes(1:n-1),1)+diag(nOnes(1:n-1),-1); for i=1:10 % i is from 1 to 10 Ai=A(1:i,1:i); % A_i matrix is selected from A. disp(det(Ai)); end \end{verbatim} \begin{outputs} \begin{verbatim} 2 3 4 5 6 7 8 9 10 11 \end{verbatim} \end{outputs} \noindent From the outputs, we make a conjecture about the relationship between $n$ and $\det(A_{n})$ as follows: $$\det(A_{n})=n+1.$$ \end{sol} \section{Properties of Determinants} \begin{exer}(\textit{Determinants with LU-decomposition}) In this problem, we find the determinant of the matrix $A$ by using the $LU$-decomposition of $A$, where \begin{displaymath} A = \left[\begin{array}{rrrr} -2& \hspace{1mm} 2& \hspace{1mm}-4& \hspace{2mm} -6\\ -3 & 6 & 3 & -15 \\ 5 & -8 & -1 & 17 \\ 1 & 1 & 11 & 7 \end{array} \right]. \end{displaymath} \vspace{1mm} \begin{enumerate} \item[(a)] Compute the determinant of $A$ directly by using the MATLAB command \textit{det} for $A$. \vspace{1mm} \item[(b)] Compute the determinant of $A$ by using the MATLAB command \textit{lu} for $A$. Confirm that you get the same results. \end{enumerate} \end{exer} \begin{sol} \begin{verbatim} %(a) A = [-2 2 -4 -6; -3 6 3 -15; 5 -8 -1 17; 1 1 11 7]; det_A = det(A); % Find the determinant of A by using the command det. disp('The determinant of A by direct use of the command det is'); disp(det_A); %(b) [L U P] = lu(A); % We have a PLU-decomposition of A. (i.e., PA=LU ). % Since the determinant of a triangular matrix is % just a product of diagonal entries, det_L = prod(diag(L)); % The product of diagonal entries of L. % Or, you may use the command det for L, directly. (i.e., det_L = det(L)). det_U = prod(diag(U)); % The product of diagonal entries of U. % Or, you may use the command det for U, directly. (i.e., det_U = det(U)). % If you observe the permutation matrix P, you can see that % P is an odd permutation. Thus, we have det(P) = -1. det_P = -1; % Or, you may use the command det for P, directly. (i.e., det_P = det(P)). % Since PA = LU, det(P)*det(A) = det(L)*det(U). det_A = det_P * det_L * det_U; disp('The determinant of A by using the LU-decomposition is'); disp(det_A); \end{verbatim} \begin{outputs} \begin{verbatim} The determinant of A by direct use of the command det is 24.0000 The determinant of A by using the PLU-decomposition is 24.0000 \end{verbatim} \end{outputs} \end{sol} \vspace{3mm} \begin{exer} (\textit{Effects of Elementary Row Operations on the Determinant}) Using the MATLAB command \textit{det}, confirm the formulas (a)-(c) in Theorem 4.2.2 of Section 4.2 for the matrix $A$ given in the problem 31 of Exercise set 4.1. \end{exer} \begin{sol} \begin{verbatim} A = [3 3 0 5; 2 2 0 -2; 4 1 -3 0; 2 10 3 2]; % (a). Multiply the second row of A by 2 and call it A2. % Initialize the matrix A2 as A. A2 = A; % Multiply the second row of A by 2. A2(2,:) = 2*A(2,:); disp('The determinant of A2 is'); disp(det(A2)); disp('2*det(A) = '); disp(2*det(A)); % (b). Interchange the rows 2 and 4 of A and call it A24. % Initialize the matrix A24 as A. A24 = A; % Interchange the rows 2 and 4 of A. A24(2, :) = A(4, :) ; A24(4, :) = A(2, :); disp('The determinant of A24 is'); disp(det(A24)); disp('-det(A) = '); disp(-det(A)); % (c). Add 2 times row 3 to row 4 of A and call it A234. % Initialize the matrix A234 as A. A234 = A; % Add 2 times row 3 of A to row 4. A234(4, :) = 2 * A(3, :) + A(4, :); disp('The determinant of A234 is'); disp(det(A234)); disp('det(A) = '); disp(det(A)); \end{verbatim} \begin{outputs} \begin{verbatim} The determinant of A2 is -480 2*det(A) = -480.0000 The determinant of A24 is 240.0000 -det(A) = 240.0000 The determinant of A234 is -240.0000 det(A) = -240.0000 \end{verbatim} \end{outputs} \end{sol} \vspace{3mm} \begin{exer} Use a determinant to show that if $a, b, c,$ and $d$ are not all zeros, then the vectors \begin{eqnarray*} \mathbf{v}_{1}&=&(a,\, b,\, c,\, d)\\ \mathbf{v}_{2}&=&(-b, a, d, -c)\\ \mathbf{v}_{3}&=&(-c, -d, a, b)\\ \mathbf{v}_{4}&=&(-d, c, -b, a) \end{eqnarray*} are linearly independent. \end{exer} \begin{sol} \begin{verbatim} syms a b c d; v1=[a b c d]; v2=[-b a d -c]; v3=[-c -d a b]; v4=[-d c -b a]; V=[v1; v2; v3; v4]; disp('det(V) is'); disp(simplify(det(V))); \end{verbatim} \begin{outputs} \begin{verbatim} det(V) is (a^2 + b^2 + c^2 + d^2)^2 \end{verbatim} \end{outputs} \end{sol} \section{Cramer's Rule; Formula for $A^{-1}$; Applications} No MATLAB problems in this section. \newpage \section{A First Look at Eigenvalues and Eigenvectors} \begin{exer}(\textit{Eigenvalues and Eigenvectors})\\ Use the MATLAB command \textit{eig} to find the eigenvalues and the associated \mbox{eigenvectors} of the matrix $A$, where $$A = \left[\begin{array}{rrrr} 2& \hspace{1mm}-3& \hspace{1mm} 1& \hspace{3mm} 0\\ 1 & 1 & 2 & 2\\ 3 & 0 & -1 & 4 \\ 1 & 6 & 5 & 6 \end{array} \right].$$ Display the results with long digits. \end{exer} \begin{sol} \begin{verbatim} % Construct the matrix A. A=[2 -3 1 0; 1 1 2 2; 3 0 -1 4; 1 6 5 6]; % Find the eigenvalues and eigenvectors of A by using eig. % This command gives AQ = QD. [Q D] = eig(A); lambda1 = D(1,1); lambda2 = D(2,2); lambda3 = D(3,3); lambda4 = D(4,4); % Extract each column vector as an eigenvector of A. x1 = Q(:,1); x2 = Q(:,2); x3 = Q(:,3); x4 = Q(:,4); % Display the result with long digits. format long; disp('lambda1 is'); disp(lambda1); disp('The eigenvector corresponding to lambda1 is'); disp(x1'); disp('lambda2 is'); disp(lambda2); disp('The eigenvector corresponding to lambda2 is'); disp(x2'); disp('lambda3 is'); disp(lambda3); disp('The eigenvector corresponding to lambda3 is'); disp(x3'); disp('lambda4 is'); disp(lambda4); disp('The eigenvector corresponding to lambda4 is'); disp(x4'); \end{verbatim} \begin{outputs} \begin{verbatim} lambda1 is 9.561855032395805 The eigenvector corresponding to lambda1 is -0.067716707308095 0.278176502030497 0.322465582156500 0.902246213399589 lambda2 is -3.364648937746373 The eigenvector corresponding to lambda2 is 0.275562522991092 0.197508356444458 -0.885771126913498 0.316962546342283 lambda3 is 1.802793905350564 The eigenvector corresponding to lambda3 is -0.833621905475750 -0.103812731179200 -0.147042873144503 0.522183711938150 lambda4 is -3.860931435448914e-16 The eigenvector corresponding to lambda4 is -0.705886578756789 -0.456750139195570 0.041522739926871 0.539795619049310 \end{verbatim} \end{outputs} \noindent\textit{Remark.} In fact, if we compute $\lambda_{4}$ by hand, we can obtain that $\lambda_{4}=0$. However, from the result, we see that the resulting value of $\lambda_{4}$ seems to be nonzero even though it is small enough. This is due to roundoff errors in arithmetic operations. Please refer to the help command of \textit{eps}, then you can see that $eps = 2.220446049250313e-016$ is floating-point relative accuracy, which means that \textit{eps} value is the allowable tolerance when we do numerical computations with rounding floating-point number off. ($i.e.$, $eps$ is an upper bound on the relative error due to rounding in floating point arithmetic.) Therefore, we can regard the resulting value of $\lambda_{4}$ as zero. \end{sol} \vspace{3mm} \begin{exer}(\textit{Eigenvalues and Eigenvectors}) Define an $n$th-order \textbf\textit{checkboard matrix} $C_{n}$ to be a matrix that has a 1 in the upper left corner and alternates between 1 and 0 along rows and columns (see the figure below). Find the eigenvalues of $C_{1}, C_{2}, \cdots$ to make a conjecture about the eigenvalues of $C_{n}$. What can you say about the eigenvalues of $C_{n}$? \begin{figure}[h]\centering \includegraphics[width=4cm]{figure.jpg} \end{figure} \end{exer} \begin{sol} \begin{verbatim} format short; n=10; % Set the size of the large check board % Construct your checkboard CheckBoard=zeros(n); CheckBoard(1:2:n, 1:2:n)=1; CheckBoard(2:2:n, 2:2:n)=1; for i=1:n Cn=CheckBoard(1:i, 1:i); [Qn Dn]=eig(Cn); % Eigenvectors and eigenvalues fprintf('The size of the checkboard is %d \n',i); disp(diag(Dn)'); end \end{verbatim} \begin{outputs} \begin{verbatim} The size of the checkboard is 1 1 The size of the checkboard is 2 1 1 The size of the checkboard is 3 0 1 2 The size of the checkboard is 4 0 0 2 2 The size of the checkboard is 5 -0.0000 -0.0000 0.0000 2.0000 3.0000 The size of the checkboard is 6 -0.0000 -0.0000 -0.0000 -0.0000 3.0000 3.0000 The size of the checkboard is 7 -0.0000 -0.0000 0.0000 0.0000 0.0000 3.0000 4.0000 The size of the checkboard is 8 -0.0000 -0.0000 -0.0000 0.0000 0.0000 0.0000 4.0000 4.0000 The size of the checkboard is 9 -0.0000 -0.0000 -0.0000 -0.0000 0 0.0000 0.0000 4.0000 5.0000 The size of the checkboard is 10 -0.0000 -0.0000 -0.0000 0 0.0000 0.0000 0.0000 0.0000 5.0000 5.0000 \end{verbatim} \end{outputs} \noindent We may conclude that the eigenvalues of $C_{n}$ are given as follows: $$\begin{cases} 1 &\text{if $n=1$,}\\ k,\, k,\, \underbrace{0,\,0,\,\cdots,\,0}_{(n-2)}&\text{if $n=2k$,}\\ k,\,k+1,\,\underbrace{0,\,0,\,\cdots,\,0}_{(n-2)}&\text{if $n=2k+1$,} \end{cases} $$ where $k$ is a positive integer. \end{sol}
{ "alphanum_fraction": 0.62866149, "avg_line_length": 28.2760869565, "ext": "tex", "hexsha": "c5a6fa27811d08fcdc103bb47c7182c22cabd9b2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f955eb2789b463d8cffbfbbb321bcd057d32933a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mireiffe/mas109_matlab_2021_2", "max_forks_repo_path": "files/intro/Learning MATLAB with Linear Algebra (Jeon, Lee)/section4.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "f955eb2789b463d8cffbfbbb321bcd057d32933a", "max_issues_repo_issues_event_max_datetime": "2021-09-19T08:29:55.000Z", "max_issues_repo_issues_event_min_datetime": "2021-09-19T08:29:55.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mireiffe/mas109_matlab_2021_2", "max_issues_repo_path": "_codes/intro/Learning MATLAB with Linear Algebra (Jeon, Lee)/section4.tex", "max_line_length": 672, "max_stars_count": null, "max_stars_repo_head_hexsha": "f955eb2789b463d8cffbfbbb321bcd057d32933a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mireiffe/mas109_matlab_2021_2", "max_stars_repo_path": "_codes/intro/Learning MATLAB with Linear Algebra (Jeon, Lee)/section4.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4614, "size": 13007 }
\documentclass[12pt,parskip=full]{article} \usepackage{lmodern} \usepackage{amsmath} \usepackage[left=1.0in,right=1.0in,top=0.5in,bottom=1.0in]{geometry} \geometry{letterpaper} \usepackage{graphicx} \usepackage{caption} \usepackage{subcaption} \usepackage{longtable} \usepackage{float} \usepackage{wrapfig} \usepackage{soul} \usepackage{textcomp} \usepackage{marvosym} \usepackage{wasysym} \usepackage{latexsym} \usepackage{amssymb} \usepackage{apacite} \usepackage{tabu} \usepackage[svgnames]{xcolor} \usepackage{tikz} \usepackage[linktoc=all]{hyperref} \usepackage{cleveref} \usepackage{listings} \usepackage{setspace} \usepackage{parskip} \usepackage{array} \usepackage{apacite} \usepackage{natbib} \usepackage{multicol} \usepackage{subcaption} \usepackage{mathtools} \usetikzlibrary{arrows} \pgfdeclarelayer{edgelayer} \pgfdeclarelayer{nodelayer} \pgfsetlayers{edgelayer,nodelayer,main} \tikzstyle{none}=[inner sep=0pt] \tikzstyle{waypt}=[circle,fill=Black,draw=Black,scale=0.4] \tikzstyle{Helobody}=[circle,fill=White,draw=Black,scale=4.0] \tikzstyle{Tailrotor}=[circle,fill=White,draw=Black,scale=1.0] \tikzstyle{ForceVector}=[->,draw=Indigo,fill=Indigo] \tikzstyle{Coordinate}=[->,draw=Red,fill=Red,fill opacity=1.0] \tikzstyle{angle}=[->] \tikzstyle{MeasureMark}=[|-|] \newlength{\imagewidth} \newlength{\imagescale} \setlength{\parskip}{11pt} %\setlength{\parindent}{15pt} \usepackage{bookmark} \makeatletter \renewcommand\@seccntformat[1]{} \makeatother \lstset { language=Matlab, keywords={break,case,catch,continue,else,elseif,end,for,function, global,if,otherwise,persistent,return,switch,try,while}, basicstyle=\ttfamily, keywordstyle=\color{blue}, commentstyle=\color{ForestGreen}, stringstyle=\color{purple}, numbers=left, numberstyle=\tiny\color{gray}, stepnumber=1, numbersep=10pt, backgroundcolor=\color{white}, tabsize=4, showspaces=false, showstringspaces=false } \renewcommand{\thesection}{\arabic{section}} \renewcommand{\thesubsection}{\thesection\alph{subsection}} \renewcommand{\theequation}{\thesubsection\arabic{equation}} \newcommand*\circled[1]{\tikz[baseline=(char.base)]{ \node[shape=circle,draw,inner sep=1pt] (char) {#1};}} \newcolumntype{L}[1]{>{\raggedright\let\newline\\\arraybackslash\hspace{0pt}}m{#1}} \newcolumntype{C}[1]{>{\centering\let\newline\\\arraybackslash\hspace{0pt}}m{#1}} \newcolumntype{R}[1]{>{\raggedleft\let\newline\\\arraybackslash\hspace{0pt}}m{#1}} \numberwithin{subsection}{section} \begin{document} \vspace{-4ex} \title{Remote Plotting Protocol Definition\vspace{-3.5ex}} \author{Rob Rau\vspace{-4ex}} \date{\today\vspace{-4ex}} \maketitle \section{Function Data Layout} %private enum Function : byte %{ % Plot = 0, // done % Figure, // done % SetupPlot, // done % Print, // done % Xlabel, // done % Ylabel, // done % Title, // done % Subplot, // done - testing % Legend, // done % Hold, // done % Axis, // done % Grid, // done % Contour, // % Colorbar, // % Semilogx, // % Semilogy, // % Loglog // %} \subsection{plot} \begin{lstlisting} plot(x, y, ...); plot(x, y, "format", ...); \end{lstlisting} \begin{centering} \begin{tabular}{| C{2cm} |} \hline CMD \\ \hline FMT \\ \hline LINES \\ \hline $\mathrm{N_{L0}[0]}$ \\ \hline ... \\ \hline $\mathrm{N_{L0}[3]}$ \\ \hline $\mathrm{X_{L0}(0)[0]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(0)[7]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(N)[0]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(N)[7]}$ \\ \hline $\mathrm{Y_{L0}(0)[0]}$ \\ \hline ...\\ \hline $\mathrm{Y_{L0}(0)[7]}$ \\ \hline \end{tabular} \begin{tabular}{| C{2cm} |} \hline ... \\ \hline FMT \\ \hline LINES \\ \hline $\mathrm{N_{L0}[0]}$ \\ \hline ... \\ \hline $\mathrm{N_{L0}[3]}$ \\ \hline $\mathrm{X_{L0}(0)[0]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(0)[7]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(N)[0]}$ \\ \hline ... \\ \hline $\mathrm{X_{L0}(N)[7]}$ \\ \hline $\mathrm{Y_{L0}(0)[0]}$ \\ \hline ...\\ \hline \end{tabular} \end{centering} \subsection{figure} \subsection{setupPlot} \subsection{print} \subsection{xlabel} \subsection{ylabel} \subsection{title} \subsection{subplot} \subsection{legend} \subsection{hold} \subsection{axis} \subsection{grid} \subsection{contour} \subsection{colorbar} \subsection{semilogx} \subsection{semilogy} \subsection{loglog} \end{document}
{ "alphanum_fraction": 0.6224849616, "avg_line_length": 22.7405660377, "ext": "tex", "hexsha": "09cdf31c287d0f6758db5b995341b03e3f447ff3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e42f5510960255d24c44cc842856c6649a8c1fdc", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Rob-Rau/rpp", "max_forks_repo_path": "doc/ProtocolDefinition/ProtocolDefinition.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "e42f5510960255d24c44cc842856c6649a8c1fdc", "max_issues_repo_issues_event_max_datetime": "2016-02-21T17:05:32.000Z", "max_issues_repo_issues_event_min_datetime": "2016-02-21T17:05:32.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Rob-Rau/rpp", "max_issues_repo_path": "doc/ProtocolDefinition/ProtocolDefinition.tex", "max_line_length": 83, "max_stars_count": null, "max_stars_repo_head_hexsha": "e42f5510960255d24c44cc842856c6649a8c1fdc", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Rob-Rau/rpp", "max_stars_repo_path": "doc/ProtocolDefinition/ProtocolDefinition.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1833, "size": 4821 }
% ------------------------------------------------------------------------+ % Copyright (c) 2001 by Punch Telematix. All rights reserved. | % | % Redistribution and use in source and binary forms, with or without | % modification, are permitted provided that the following conditions | % are met: | % 1. Redistributions of source code must retain the above copyright | % notice, this list of conditions and the following disclaimer. | % 2. Redistributions in binary form must reproduce the above copyright | % notice, this list of conditions and the following disclaimer in the | % documentation and/or other materials provided with the distribution. | % 3. Neither the name of Punch Telematix nor the names of other | % contributors may be used to endorse or promote products derived | % from this software without specific prior written permission. | % | % THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | % WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | % MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | % IN NO EVENT SHALL PUNCH TELEMATIX OR OTHER CONTRIBUTORS BE LIABLE | % FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | % CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | % SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | % BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | % WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | % OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | % IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | % ------------------------------------------------------------------------+ % % $Id: thread.tex,v 1.1.1.1 2004/07/12 14:07:44 cvs Exp $ % \subsection{Threads} \subsubsection{Thread Structure Definition} The structure definition of a thread is as follows: \bcode \begin{verbatim} 1: typedef struct x_Thread * x_thread; 2: typedef void (*x_entry)(void * argument); 3: typedef void (*x_action)(x_thread thread); 4: 5: typedef struct x_Thread { 6: x_cpu cpu; 7: w_ubyte * sp; 8: w_ubyte * b_stack; 9: w_ubyte * e_stack; 10: x_entry entry; 11: void * argument; 12: w_ushort id; 13: w_ubyte a_prio; 14: volatile w_ubyte c_prio; 15: w_ubyte c_quantums; 16: w_ubyte a_quantums; 17: volatile w_ubyte state; 18: void * xref; 19: x_flags flags; 20: volatile x_thread next; 21: volatile x_thread previous; 22: x_sleep sticks; 23: x_sleep wakeup; 24: volatile x_thread snext; 25: x_action action; 26: volatile x_thread l_waiting; 27: volatile x_monitor waiting_for; 28: volatile x_thread l_competing; 29: volatile x_event competing_for; 30: volatile x_event l_owned; 31: w_size m_count; 32: } x_Thread; \end{verbatim} \ecode The relevant fields in the thread structure are the following: \begin{itemize} \item \txt{x\_thread$\rightarrow$cpu} This is the CPU specific placeholder for things like saved stack pointer, program counter and other arguments. Please refer to the cpu section for more information on specific fields in this structure. TODO \item \txt{x\_thread$\rightarrow$s\_sp} For some ports, this is the field in which the stack pointer is saved at the moment of a thread switch. \item \txt{x\_thread$\rightarrow$b\_stack} This is the lowest address of the user supplied stack space. For the user, it is the first available byte of the memory that was allocated and passed as stack space at thread creation time. Note that on most processors and hosts, the stack grows downwards and this is thus, from this viewpoint, the end of the stack. \item \txt{x\_thread$\rightarrow$e\_stack} This is the opposite of the previous field, the end of the stack, or from the point of view of processors that have a downwards growing stack, the place where the pushes of stack items begins. Note that this value is word aligned, so it could be that it is not the end of the memory region that has been passed at thread creation time. \item \txt{x\_thread$\rightarrow$entry} This is the entry function for the thread. At thread start, this is the function that will be called. The thread will end when it returns from this function. \item \txt{x\_thread$\rightarrow$argument} This is the argument, given at thread creation time, that is passed to the function described in the previous field. See also the type definition at line 2. \item \txt{x\_thread$\rightarrow$id} The identification number of a thread. This is a unique, 16 bits number in \oswald. \item \txt{x\_thread$\rightarrow$a\_prio} The assigned priority of a thread. \item \txt{x\_thread$\rightarrow$c\_prio} The current priority of a thread. Note that during some operations, namely mutex and monitor operations, the threads priority can be boosted to fight priority inversion. This is the threads priority at any moment in \oswald. When the threads priority has been boosted, after the operation, this field will be reset to the \txt{a\_prio} field value. \item \txt{x\_thread$\rightarrow$c\_quantums} Threads execute for a number of time slices, before they hand over the processor to a thread at the same priority, that is ready to run. This field holds the number of slices or quantums that the thread still has left. \item \txt{x\_thread$\rightarrow$a\_quantums} This is the number of quantums that the \txt{c\_quantums} field will be 'reloaded' with when the thread has exhausted its number of quantums. It is fixed now, but could become an argument that can be modulated later. \item \txt{x\_thread$\rightarrow$state} The state that the thread is in; more information on thread states can be found below. \item \txt{x\_thread$\rightarrow$xref} A cross reference pointer that can be used to point back to user supplied structures. It is not used by any Owald function. \item \txt{x\_thread$\rightarrow$flags} The flags that this thread has. The flags can carry \oswald information and user information. Flags are described more in detail below. \item \txt{x\_thread$\rightarrow$next} Linked list field. See also next field. \item \txt{x\_thread$\rightarrow$previous} Threads are kept in a doubly linked circular list; each priority control block has two lists, one that keeps the ready to run threads and one that keeps the pending threads; this and the previous field form this list. \item \txt{x\_thread$\rightarrow$sticks} When a thread is pending, waiting for an event to happen or just sleeping, this field holds the number of ticks that it still has to go before being woken up. \item \txt{x\_thread$\rightarrow$wakeup} At wakeup time, \oswald saves the current time in this field; between the time of wakeup and the time that the thread starts executing again, time could have elapsed allready. We need to compensate this time in the event routines that take a timing argument. \item \txt{x\_thread$\rightarrow$snext} The pending threads are kept in a singly linked list, that is woven through this field. \item \txt{x\_thread$\rightarrow$action} When a thread is pending and becomes alive again because the \txt{sticks} field became 0, this is the function pointer that is called to execute whatever householding stuff that needs done, determined by the pending state the thread was in. \item \txt{x\_thread$\rightarrow$l\_waiting} When threads are waiting on monitors, this field is used to form the singly linked list of threads that are waiting on the same monitor. Note that a thread can only be waiting on a single monitor at a time. \item \txt{x\_thread$\rightarrow$waiting\_for} When the thread is in a waiting list of a monitor, the monitor it is waiting for is given in this field. \item \txt{x\_thread$\rightarrow$l\_competing} A thread can be competing for an event (mutex, monitor, queue, i.e. any of the events); the list of threads that are competing for the event is competing formed by this field. Again note that a thread can only be competing for a single event. \item \txt{x\_thread$\rightarrow$competing\_for} The event that a thread is competing for. \item \txt{x\_thread$\rightarrow$l\_owned} Events like mutexes and monitors are 'owned' by a thread, when they are 'locked'. Threads can own multiple events at the same time. The linked list of events that are owned by a thread, starts at this field, and is further formed by the \txt{event$\rightarrow$l\_owned} field. \item \txt{x\_thread$\rightarrow$m\_count} When a thread waits on a monitor, the number of times it has locked or entered the monitor, is saved in this field, so that it can be restored later, when the thread becomes owner again of the monitor. \end{itemize} \subsubsection{Thread States} The different states that a thread can be in are summarised in table \ref{table:thread_states}. A thread is only ready to be scheduled and to run, when its state is not 0. Any other state in table \ref{table:thread_states} indicates that the thread is waiting for a certain event to happen and is not ready to run. Note that the thread states that are not 0 (ready) and that indicate that the thread is waiting for a certain event to happen, correspond with the numerical value of the event type. I.e. the event type indicator of a mutex event has value 1, for a semaphore event the value is 4 and so forth. \footnotesize \begin{longtable}{||l|c|p{9cm}||} \hline \hfill \textbf{State Name} \hfill\null & \textbf{Value} & \textbf{Meaning} \\ \hline \endhead \hline \endfoot \endlastfoot \hline % \begin{table}[!ht] % \begin{center} % \begin{tabular}{||>{\footnotesize}l<{\normalsize}|>{\footnotesize}c<{\normalsize}|>{\footnotesize}c<{\normalsize}||} \hline % \textbf{State Name} & \textbf{Value} & \textbf{Meaning} \\ \hline \txt{xt\_ready} & 0 & \begin{minipage}[t]{8.5cm} This is the state when a thread is ready to run, or is running. In \oswald, there is no special state for the currently running thread. \end{minipage} \\ \txt{xt\_mutex} & 1 & \begin{minipage}[t]{8.5cm} The state of a thread that is waiting for a mutex to become available for locking. \end{minipage} \\ \txt{xt\_queue} & 2 & \begin{minipage}[t]{8.5cm} The state of a thread waiting for elements to be posted on an empty queue or waiting for space to become available in a full queue. \end{minipage} \\ \txt{xt\_mailbox} & 3 & \begin{minipage}[t]{8.5cm} The state of a thread waiting for data to become available in a mailbox or for the mailbox to become empty so that a message can be posted. \end{minipage} \\ \txt{xt\_semaphore} & 4 & \begin{minipage}[t]{8.5cm} The state of a thread waiting for a semaphores count to become greater than 0. \end{minipage} \\ \txt{xt\_signals} & 5 & \begin{minipage}[t]{8.5cm} The state of a thread waiting on a set of signals to satisfy the get request. \end{minipage} \\ \txt{xt\_monitor} & 6 & \begin{minipage}[t]{8.5cm} The state of a thread that is trying to enter a monitor. \end{minipage} \\ \txt{xt\_block} & 7 & \begin{minipage}[t]{8.5cm} The state of a thread that is waiting for a free block to become available in a block pool. \end{minipage} \\ \txt{xt\_map} & 8 & \begin{minipage}[t]{8.5cm} The state of a thread that is waiting for a bit to become 0 (reset) in an event bitmap. \end{minipage} \\ \txt{xt\_waiting} & 9 & \begin{minipage}[t]{8.5cm} The state of a thread that is waiting on a monitor. \end{minipage} \\ \txt{xt\_suspended} & 10 & \begin{minipage}[t]{8.5cm} The state of a thread that is suspended. \end{minipage} \\ \txt{xt\_sleeping} & 11 & \begin{minipage}[t]{8.5cm} The state of a thread that is sleeping after calling the \txt{x\_thread\_sleep} function. \end{minipage} \\ \txt{xt\_ended} & 13 & \begin{minipage}[t]{8.5cm} The state of a thread that returned from the start function. \end{minipage} \\ \hline \multicolumn{3}{c}{} \\ \caption{Thread states} \label{table:thread_states} \end{longtable} \normalsize % \hline % \end{tabular} % \caption{Thread states} % \label{table:thread_states} % \end{center} % \end{table} \subsubsection{Creating a Thread} A thread is created by means of the call: \txt{x\_status x\_thread\_create(x\_thread thread, x\_entry entry, void *argument, x\_ubyte *stack, x\_size size, x\_size prio, x\_flags flags);} The arguments to this call are: \begin{enumerate} \item \txt{thread}, the thread structure pointer. This structure will contain the internal state of a thread. \item \txt{entry}, the pointer to a function that the thread will start with. The type definition \txt{x\_entry} is \txt{typedef void (*x\_entry)(void * argument)}, so \txt{x\_entry} is a function pointer of a function that returns void and takes a void pointer as its single argument. The thread stops and is put an an \txt{xt\_ended} state when this function returns. \item \txt{argument}, the argument that will be passed to the entry function. It's use is defined by the programmer. \oswald will not use this argument for any reason and will just pass it along to the entry function. \item \txt{stack}, points to a suitably sized block of memory that will be used as stack space by the thread. \item \txt{size}, indicates the number of bytes that the \txt{stack} argument has. Note that the size allowed must be larger or equal than \txt{MIN\_STACK\_SIZE}. \item \txt{prio}, is the priority of the thread. Real time priorities start from 1 and go up to 63. Soft priorities (round robin threads) start from 64 and go up to 128. The lower the value of this \txt{prio} argument, the higher the priority of the thread. \item \txt{flags}, this argument can have only 2 different values: \begin{itemize} \item \txt{TF\_SUSPENDED}, indicates that the thread should not start immediately but is put in a suspended state. The thread must explicitely be started with an \txt{x\_thread\_resume} call. \item \txt{TF\_START} to indicate that the thread should start immediately, i.e. not in a suspended state. \end{itemize} \end{enumerate} The possible return values for this call are sumarized in table \ref{table:x_thread_create}. \footnotesize \begin{longtable}{||l|p{9cm}||} \hline \hfill \textbf{Return Value} \hfill\null & \textbf{Meaning} \\ \hline \endhead \hline \endfoot \endlastfoot \hline % \begin{table}[!ht] % \begin{center} % \begin{tabular}{||>{\footnotesize}l<{\normalsize}|>{\footnotesize}c<{\normalsize}||} \hline % \textbf{Return Value} & \textbf{Meaning} \\ \hline \txt{xs\_success} & \begin{minipage}[t]{9cm} The thread was succesfully created. \end{minipage} \\ \txt{xs\_bad\_argument} & \begin{minipage}[t]{9cm} Some argument to the thread create was bad, e.g. the stack size was less than \txt{MIN\_STACK\_SIZE}, or the entry function was NULL or the flags argument was not one of the allowed values. \end{minipage} \\ \hline \multicolumn{2}{c}{} \\ \caption{Return Status for \txt{x\_thread\_create}} \label{table:x_thread_create} \end{longtable} \normalsize % \hline % \end{tabular} % \caption{Return Status for \txt{x\_thread\_create}} % \label{table:x_thread_create} % \end{center} % \end{table} \subsubsection{Suspending a Thread} There are two different calls available to suspend a thread: \txt{x\_status x\_thread\_suspend(x\_thread thread);} \\ \txt{x\_status x\_thread\_suspend\_cb(x\_thread thread, x\_ecb cb, void *arg);} Both functions will suspend a thread but the second call implements a callback facility which makes it potentially much safer to suspend a thread. The thread that is to be suspended is passed as \txt{thread} argument. A thread can call this function to suspend itself. A thread that is in a suspended state can be deleted with the \txt{x\_thread\_delete} call or resumed with the \txt{x\_thread\_resume} function. It is potentially unsafe to suspend a thread when the caller doesn't know exactly in what state a thread is. Suppose that thread A is just performing a memory allocation with the \txt{x\_mem\_alloc} call. The memory package that implements this call will lock a mutex while manipulating internal memory structures. The owner of this mutex is the thread that is performing the call, in this example thread A. If thread A was to be suspended when owning this lock, no other thread in the entire system could perform a memory allocation call or release call, potentially bringing the whole system to a virtual standstill. To give the programmer the possibility to remedy these kind of situations, the callback system is available. The \txt{x\_ecb} declares the following type of function. \txt{typedef x\_boolean (*x\_ecb)(x\_event event, void *argument);} It is a function pointer, which function takes as arguments an event pointer and an argument pointer and returns a boolean. When a thread is suspended with the callback variant, the callback function is called for each event, a mutex or a monitor, that the thread being suspended, is owner of. The \txt{argument} field of the \txt{x\_thread\_suspend\_cb} function, is passed on as the second argument to the callback function and is programmer defined. In the callback function, any actions can be undertaken under control of the programmer. She could decide to check wether the owned event is a mutex or monitor and call the relevant release function for it. When the return value of the callback, for any invocation, was not \txt{true}, the thread will \textbf{not} be suspended and the return status of the \txt{x\_thread\_suspend\_cb} call will be \txt{xs\_owner} to indicate that the thread being suspended owned at least one event. If all the invocations of the callback returned the \txt{false} value. The thread will be suspended, regardless wether it still owned events or not. If the thread still owned events, the return status will also be \txt{xs\_owner}. When the thread didn't own any events anymore, it returns \txt{xs\_success}. For a full overview of the return values of either suspend call, refer to table \ref{table:x_thread_suspend}. \footnotesize \begin{longtable}{||l|p{9cm}||} \hline \hfill \textbf{Return Value} \hfill\null & \textbf{Meaning} \\ \hline \endhead \hline \endfoot \endlastfoot \hline % \begin{table}[!ht] % \begin{center} % \begin{tabular}{||>{\footnotesize}l<{\normalsize}|>{\footnotesize}c<{\normalsize}||} \hline % \textbf{Return Value} & \textbf{Meaning} \\ \hline \txt{xs\_success} & \begin{minipage}[t]{9cm} The thread was succesfully suspended and doesn't own any events anymore. \end{minipage} \\ \txt{xs\_no\_instance} & \begin{minipage}[t]{9cm} The thread being suspended was either in a \txt{xt\_ended} state or a \txt{xt\_suspended} state allready. \end{minipage} \\ \txt{xs\_owner} & \begin{minipage}[t]{9cm} This indicates that the thread still owns events. Wether the thread was really suspended depends on the context. When the suspension was tried with the non callback variant, the thread is suspended. When the suspension was performed with the callback variant, it depends on the context. If the callback returned at least a single \txt{false} value. The thread will not be suspended. If the callback always returned \txt{true} as value, the thread will be suspended, but still ows events. I.e. the callback did not succeed in releasing the owned events. \end{minipage} \\ \hline \multicolumn{2}{c}{} \\ \caption{Return Status for \txt{x\_thread\_suspend}} \label{table:x_thread_suspend} \end{longtable} \normalsize % \hline % \end{tabular} % \caption{Return Status for \txt{x\_thread\_suspend}} % \label{table:x_thread_suspend} % \end{center} % \end{table} \subsubsection{Resuming a Thread} A thread can be resumed with the call: \txt{x\_status x\_thread\_resume(x\_thread thread);} The \txt{thread} argument to the call is the thread that needs resuming. The return values of this call are simple and summarised in table \ref{table:x_thread_resume}. \footnotesize \begin{longtable}{||l|p{9cm}||} \hline \hfill \textbf{Return Value} \hfill\null & \textbf{Meaning} \\ \hline \endhead \hline \endfoot \endlastfoot \hline % \begin{table}[h] % \begin{center} % \begin{tabular}{||>{\footnotesize}l<{\normalsize}|>{\footnotesize}c<{\normalsize}||} \hline % \textbf{Return Value} & \textbf{Meaning} \\ \hline \txt{xs\_success} & \begin{minipage}[t]{9cm} The thread was succesfully resumed. \end{minipage} \\ \txt{xs\_no\_instance} & \begin{minipage}[t]{9cm} The thread being resumed is not in a suspended state, i.e. there was no preceeding \txt{x\_thread\_suspend} call for this thread. \end{minipage} \\ \hline \multicolumn{2}{c}{} \\ \caption{Return Status for \txt{x\_thread\_resume}} \label{table:x_thread_resume} \end{longtable} \normalsize % \hline % \end{tabular} % \caption{Return Status for \txt{x\_thread\_resume}} % \label{table:x_thread_resume} % \end{center} % \end{table} \subsubsection{Making a Thread Sleep \& Wake up} A thread can go to sleep for a number a certain number of ticks with the following call: \txt{void x\_thread\_sleep(x\_sleep ticks);} Note that a \txt{ticks} argument of 0 has no effect. A thread can be made to sleep for an eternal amount of time by given the \txt{ticks} argument the value of \txt{x\_eternal}. A thread that has been put to sleep, for an eternal time or a limited time, can be woken up with the following call: \txt{x\_status x\_thread\_wakeup(x\_thread thread);} When the thread was not sleeping, the status returned is \txt{xs\_no\_instance}, otherwise, \txt{xs\_success} is returned. \subsubsection{Deleting a Thread} A thread can only be deleted when its state is either \txt{xt\_ended} because if returned from the entry function, or when it's state is \txt{xt\_suspended}, because of one of the \txt{x\_thread\_suspend} calls. \subsubsection{Changing Thread Priority} A threads priority can be changed with the call: \txt{x\_status x\_thread\_priority\_set(x\_thread thread, x\_size newprio);} This function will return \txt{xs\_success} when the thread did change priority or \txt{xs\_bad\_argument} if the \txt{newprio} argument was out of bounds. \subsubsection{Identifying the Current Thread} The \txt{x\_thread} pointer of the currently running thread can be found with the following call: \txt{x\_thread x\_thread\_current(void);} The 16 bits unique ID of a thread is found in the \txt{x\_thread$\rightarrow$id} field of the \txt{x\_thread} structure. Note that this ID is unique and is recycled. This means that the thread ID does not convey any information about the start order of threads; e.g. a thread with ID 128 could have been created after the thread with ID 130.
{ "alphanum_fraction": 0.7349150173, "avg_line_length": 36.9741518578, "ext": "tex", "hexsha": "6f3c896a6fbeb3093d67f971bcc9d41c11d93fd0", "lang": "TeX", "max_forks_count": 9, "max_forks_repo_forks_event_max_datetime": "2021-07-13T11:35:45.000Z", "max_forks_repo_forks_event_min_datetime": "2016-05-05T15:19:17.000Z", "max_forks_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed", "max_forks_repo_licenses": [ "ICU" ], "max_forks_repo_name": "kifferltd/open-mika", "max_forks_repo_path": "vm-cmp/kernel/oswald/doc/thread.tex", "max_issues_count": 11, "max_issues_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed", "max_issues_repo_issues_event_max_datetime": "2020-12-14T18:08:58.000Z", "max_issues_repo_issues_event_min_datetime": "2015-04-11T10:45:33.000Z", "max_issues_repo_licenses": [ "ICU" ], "max_issues_repo_name": "kifferltd/open-mika", "max_issues_repo_path": "vm-cmp/kernel/oswald/doc/thread.tex", "max_line_length": 128, "max_stars_count": 41, "max_stars_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed", "max_stars_repo_licenses": [ "ICU" ], "max_stars_repo_name": "kifferltd/open-mika", "max_stars_repo_path": "vm-cmp/kernel/oswald/doc/thread.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-28T20:18:59.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-14T12:03:18.000Z", "num_tokens": 6200, "size": 22887 }
% !TEX root = ../notes_template.tex \chapter{Quantum Mechanics}\label{chp:quantum_mechanics} \gls{hamiltonian}; \gls{qft}; \glsxtrshort{qm}; \gls{lagrangian}
{ "alphanum_fraction": 0.7421383648, "avg_line_length": 19.875, "ext": "tex", "hexsha": "f6b1547e52e06b2755cee732acc6941273ea2421", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "77e962fcff56b6e5345f0e9c30dc0c5038bdea3c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Jue-Xu/Latex-Template-for-Scientific-Style-Book", "max_forks_repo_path": "chapter/quantum_mechanics.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "77e962fcff56b6e5345f0e9c30dc0c5038bdea3c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Jue-Xu/Latex-Template-for-Scientific-Style-Book", "max_issues_repo_path": "chapter/quantum_mechanics.tex", "max_line_length": 56, "max_stars_count": null, "max_stars_repo_head_hexsha": "77e962fcff56b6e5345f0e9c30dc0c5038bdea3c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Jue-Xu/Latex-Template-for-Scientific-Style-Book", "max_stars_repo_path": "chapter/quantum_mechanics.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 60, "size": 159 }
\documentclass{report} \newcommand{\docTitle}{User guide for using the python framework to generate models in FreeCAD} \newcommand{\docAuthor}{A.~F.~D.~Morgan} \usepackage{graphicx} \usepackage{multirow} \usepackage{a4wide} \usepackage{fancyhdr} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{listings} \usepackage{color} \usepackage[colorlinks=true, linkcolor=blue, pdfauthor=\docAuthor, pdftitle=\docTitle]{hyperref} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} \setlength{\headheight}{15.2pt} \pagestyle{fancy} \newcommand\abs[1]{\left|#1\right|} \rhead{\docTitle} \rfoot{\docAuthor} \lhead{} \begin{document} \title{\bf{\docTitle}} \author{\docAuthor} \maketitle \tableofcontents \chapter{Introduction} This framework is designed to make the programatic control of FreeCAD easier with the aim of using the STL files generated as an input to EM modelling software. Geometries are defined in python and can be parameterised, allowing for parameter sweeps. The parameters used to generate a particular model are also stored which aids later analysis. One of the advantages of this approach is that the models can be visualised and errors quickly identified \emph{before} being passed on to EM simulation thus saving time overall. \chapter{Using an existing file} Use the python interpreter built into FreeCAD. \begin{verbatim} python pillbox_cavity.py C:\temp \end{verbatim} will run the code which generates the pillbox cavity models as defined in \verb|pillbox_cavity.py| and places the results in \verb|C:\temp\pillbox_cavity|. in this folder will be a sub folder containing the base model, also sub folders containing additional models which form part of user defined parameter sweeps (also defined in \verb|pillbox_cavity.py|) Inside each of these sub folders is a native FreeCAD file containing the model. Also a text file describing the parameter settings used to generate this model. There is also a folder caller binary and one named ascii. These contain STL files of the various components defined in \verb|pillbox_cavity.py|. Usually a separate component is defined if it will be a different material in the model. \chapter{Creating a new file} \label{chap:Creating a new file} This is an example of a simple input file (\verb|pillbox_cavity.py|). All model files share the same basic structure however things like the imports, input parameters, and parts directory will likely need adjustment in addition to the model definition section itself. \begin{lstlisting}[language=Python] from freecad_elements import make_beampipe, make_circular_aperture, ModelException, parameter_sweep, base_model from sys import argv import os # baseline model parameters INPUT_PARAMETERS = {'cavity_radius': 20, 'cavity_length': 20, 'pipe_radius': 10, 'pipe_length': 80} # The model name is the name of the file. # The output path is a user defined location. MODEL_NAME, OUTPUT_PATH = argv def pillbox_cavity_model(input_parameters): """ Generates the geometry for the pillbox cavity in FreeCAD. Also writes out the geometry as STL files and writes a "sidecar" text file containing the input parameters used. Args: input_parameters (dict): Dictionary of input parameter names and values """ try: # The model is defined in this section. Different components can be defined # and later added to the parts dictionary. wire1, face1 = make_circular_aperture(input_parameters['pipe_radius']) wire2, face2 = make_circular_aperture(input_parameters['cavity_radius']) beampipe1 = make_beampipe(face1, input_parameters['pipe_length'], (-input_parameters['pipe_length'] / 2. - input_parameters['cavity_length'] / 2., 0, 0) ) beampipe3 = make_beampipe(face1, input_parameters['pipe_length'], (input_parameters['pipe_length'] / 2. + input_parameters['cavity_length'] / 2., 0, 0) ) beampipe2 = make_beampipe(face2, input_parameters['cavity_length']) fin1 = beampipe1.fuse(beampipe2) fin2 = fin1.fuse(beampipe3) except Exception as e: # This allows errors in the model to be separated from other code errors. raise ModelException(e) # An entry in the parts dictionary corresponds to an STL file. # This is useful for parts of differing materials. parts = {'all': fin2} return parts, os.path.splitext(os.path.basename(MODEL_NAME))[0] # Generate the base model. base_model(pillbox_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, accuracy=10) # Generate additional models to form a parameter sweep. parameter_sweep(pillbox_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'cavity_radius', [10, 30, 40, 50]) \end{lstlisting} \chapter{Helper functions} In \verb|freecad_elements.py| various helper functions are defined. These are mainly to do with creating beam pipes with various shapes, and tapers between various apertures. Their basic use can be seen in the example code in \autoref{chap:Creating a new file}. \end{document}
{ "alphanum_fraction": 0.7235093697, "avg_line_length": 46.5873015873, "ext": "tex", "hexsha": "1eb0136273239b2af807cf645615ccdfadfde544", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5cb456be0ac9145dd50fb9dda67adceae2f2966f", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "alunmorgan/GdfidL-framework", "max_forks_repo_path": "Documentation/User_guide.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "5cb456be0ac9145dd50fb9dda67adceae2f2966f", "max_issues_repo_issues_event_max_datetime": "2019-06-18T14:01:25.000Z", "max_issues_repo_issues_event_min_datetime": "2019-06-07T01:29:56.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "alunmorgan/GdfidL-framework", "max_issues_repo_path": "Documentation/User_guide.tex", "max_line_length": 393, "max_stars_count": null, "max_stars_repo_head_hexsha": "5cb456be0ac9145dd50fb9dda67adceae2f2966f", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "alunmorgan/GdfidL-framework", "max_stars_repo_path": "Documentation/User_guide.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1427, "size": 5870 }
{% raw %} \documentclass{vgtc} % final (conference style) %\documentclass[review]{vgtc} % review %\documentclass[widereview]{vgtc} % wide-spaced review %\documentclass[preprint]{vgtc} % preprint %\documentclass[electronic]{vgtc} % electronic version \ifpdf% % if we use pdflatex \pdfoutput=1\relax % create PDFs from pdfLaTeX \pdfcompresslevel=9 % PDF Compression \pdfoptionpdfminorversion=7 % create PDF 1.7 \ExecuteOptions{pdftex} \usepackage{graphicx} % allow us to embed graphics files \DeclareGraphicsExtensions{.pdf,.png,.jpg,.jpeg} % for pdflatex we expect .pdf, .png, or .jpg files \else% % else we use pure latex \ExecuteOptions{dvips} \usepackage{graphicx} % allow us to embed graphics files \DeclareGraphicsExtensions{.eps} % for pure latex we expect eps files \fi% %% it is recomended to use ``\autoref{sec:bla}'' instead of ``Fig.~\ref{sec:bla}'' \graphicspath{{figs/}} % where to search for the images \usepackage{microtype} % use micro-typography (slightly more compact, better to read) \PassOptionsToPackage{warn}{textcomp} % to address font issues with \textrightarrow \usepackage{textcomp} % use better special symbols \usepackage{mathptmx} % use matching math font \usepackage{times} % we use Times as the main font \renewcommand*\ttdefault{txtt} % a nicer typewriter font \usepackage{cite} % needed to automatically sort the references \usepackage{tabu} % only used for the table example \usepackage{booktabs} % only used for the table example \onlineid{0} \vgtccategory{Research} \vgtcinsertpkg %\preprinttext{To appear in an IEEE VGTC sponsored conference.} %% Paper title. \title{title} %% This is how authors are specified in the conference style %% Author and Affiliation (single author). %%\author{Roy G. Biv\thanks{e-mail: [email protected]}} %%\affiliation{\scriptsize Allied Widgets Research} %% Author and Affiliation (multiple authors with single affiliations). %%\author{Roy G. Biv\thanks{e-mail: [email protected]} % %%\and Ed Grimley\thanks{e-mail:[email protected]} % %%\and Martha Stewart\thanks{e-mail:[email protected]}} %%\affiliation{\scriptsize Martha Stewart Enterprises \\ Microsoft Research} %% Author and Affiliation (multiple authors with multiple affiliations) \author{Author1 \thanks{e-mail: [email protected]}\\ \scriptsize Author1 Affiliation % \and Author2 \thanks{e-mail: [email protected]}\\ \scriptsize Author2 Affiliation} % \teaser{ % \includegraphics[width=17cm]{teaser.pdf} % \caption{teaser image} % } \abstract{ abstract section. } %% ACM Computing Classification System (CCS). %% See <http://www.acm.org/about/class> for details. %% We recommend the 2012 system <http://www.acm.org/about/class/class/2012> %% For the 2012 system use the ``\CCScatTwelve'' which command takes four arguments. %% The 1998 system <http://www.acm.org/about/class/class/2012> is still possible %% For the 1998 system use the ``\CCScat'' which command takes four arguments. %% In both cases the last two arguments (1998) or last three (2012) can be empty. \CCScatlist{ \CCScatTwelve{Human-centered computing}{Visu\-al\-iza\-tion}{Visu\-al\-iza\-tion techniques}{Treemaps}; \CCScatTwelve{Human-centered computing}{Visu\-al\-iza\-tion}{Visualization design and evaluation methods}{} } %% Copyright space is enabled by default as required by guidelines. %% It is disabled by the 'review' option or via the following command: % \nocopyrightspace %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%% START OF THE PAPER %%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \firstsection{Introduction} \maketitle Introduction section. \section{Related Work} Related work section. \acknowledgments{ acknowledgements } \bibliographystyle{abbrv-doi} \bibliography{reference} \end{document} {% endraw %}
{ "alphanum_fraction": 0.6575664862, "avg_line_length": 40.0849056604, "ext": "tex", "hexsha": "3ee87d16b31aeaa2b3776b6e31f46bc9e4b30855", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "19066a86fa440f23776829a4bee156c4c5602fe3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "likr/cookiecutter-latex-vgtc", "max_forks_repo_path": "{{cookiecutter.project_slug}}/document.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "19066a86fa440f23776829a4bee156c4c5602fe3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "likr/cookiecutter-latex-vgtc", "max_issues_repo_path": "{{cookiecutter.project_slug}}/document.tex", "max_line_length": 109, "max_stars_count": null, "max_stars_repo_head_hexsha": "19066a86fa440f23776829a4bee156c4c5602fe3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "likr/cookiecutter-latex-vgtc", "max_stars_repo_path": "{{cookiecutter.project_slug}}/document.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1082, "size": 4249 }
\chapter{Geometric Parameters} \section{Wing Basic Geometric Parameters} \begin{figure} \centering \includegraphics[width=120mm]{eps/wing_geometric_parameters.eps} \caption{Wing basic geometric parameters} \end{figure} Aspect ratio is given by the following formula: \cite{Raymer1992} \begin{equation} A = \frac{b^2}{S} \end{equation} Taper ratio is given by the following formula. \cite{Raymer1992} \begin{equation} \lambda = \frac{c_t}{c_r} \end{equation} \section{Mean Aerodynamic Chord} \begin{figure} \centering \includegraphics[width=120mm]{eps/wing_mean_aerodynamic_chord.eps} \caption{Mean aerodynamic chord} \end{figure} For tapper wing mean aerodynamic chord can be calculated using following formula: \cite{Corke2003, Galinski2016} \begin{equation} \hat c = \frac{2}{3} c_r \frac{1+\lambda+\lambda^2}{1+\lambda} \end{equation} For more complex shapes mean aerodynamic chord is given as follows: \cite{Paturski02} \begin{equation} \hat c = \left( \int_{-\frac{b}{2}}^{\frac{b}{2}} \left( c \left( y \right) \right)^2 dy \right) \div \left( \int_{-\frac{b}{2}}^{\frac{b}{2}} \left( c \left( y \right) \right) dy \right) \end{equation} \section{Wing Aerodynamic Center} \begin{figure} \centering \includegraphics[width=120mm]{eps/wing_aerodynamic_center.eps} \caption{Wing aerodynamic center} \end{figure} Position of wing aerodynamic center ${\vec r}_{AC}$ is at 25\% of the mean aerodynamic chord and its lateral coordinate is given by the following formula. \cite{Raymer1992, Corke2003, Galinski2016, Torenbeek1982} \begin{equation} y_{AC} = \frac{ b \left( 1 + 2 \lambda \right) }{ 6 \left( 1 + \lambda \right) } \end{equation}
{ "alphanum_fraction": 0.7205882353, "avg_line_length": 28.813559322, "ext": "tex", "hexsha": "74b26eea01e37254f2df0fa8a655b75335fbd264", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-12-01T19:41:05.000Z", "max_forks_repo_forks_event_min_datetime": "2019-12-01T10:56:23.000Z", "max_forks_repo_head_hexsha": "9984f33c84787c4420f11f2834bb35e040e1f36f", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "marek-cel/mscsim-docs", "max_forks_repo_path": "tex/data_1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9984f33c84787c4420f11f2834bb35e040e1f36f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "marek-cel/mscsim-docs", "max_issues_repo_path": "tex/data_1.tex", "max_line_length": 212, "max_stars_count": 7, "max_stars_repo_head_hexsha": "9984f33c84787c4420f11f2834bb35e040e1f36f", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "marek-cel/mscsim-docs", "max_stars_repo_path": "tex/data_1.tex", "max_stars_repo_stars_event_max_datetime": "2021-09-09T07:02:20.000Z", "max_stars_repo_stars_event_min_datetime": "2019-12-01T02:27:28.000Z", "num_tokens": 563, "size": 1700 }
\chapter{Modelling renewable energy resources} \label{methods_physical} \vspace{-15pt} % -15pts for two-line heading, -45pts for single-line heading \begin{tcolorbox}[enhanced,width=\textwidth,size=fbox, sharp corners,colframe=black!5!white,drop fuzzy shadow southeast, boxrule=3mm, parbox=false] This chapter borrows from the articles \citep{walch_big_2020,walch_quantifying_2021}: \qquad %\bibentry{walch_big_2020} A. Walch, R. Castello, N. Mohajeri, and J.-L. Scartezzini (2020). Big data mining for the estimation of hourly rooftop photovoltaic potential and its uncertainty. \textit{Applied Energy}, 262:114404. \href{https://doi.org/10.1016/j.apenergy.2019.114404}{doi:10.1016/j.apenergy.2019.114404} \qquad %\bibentry{walch_quantifying_2021} A. Walch, N. Mohajeri, A. Gudmundsson, and J.-L. Scartezzini (2021). Quantifying the technical geothermal potential from shallow borehole heat exchangers at regional scale. \textit{Renewable Energy}, 165:369–380. \href{https://doi.org/10.1016/j.renene.2020.11.019}{doi:10.1016/j.renene.2020.11.019} Parts of Section~\ref{method_solar_hierarchy} overlap with the conference proceedings \cite{walch_spatio-temporal_2019,walch_critical_2019}. % \qquad \bibentry{walch_spatio-temporal_2019} % \qquad \bibentry{walch_critical_2019} \end{tcolorbox} Spatio-temporal modelling of renewable energy potentials integrates physical models with geospatial operations using Geographic Information Systems (GIS). This chapter introduces state-of-the-art modelling approaches for the two types of renewables addressed in this thesis, namely rooftop solar energy (Section \ref{method_solar}) and shallow geothermal energy (Section \ref{geo_method}). Relevant physical, geographical and technical constraints for both energy resources and popular modelling approaches are summarized to justify the methods used in the large-scale estimation of solar potential (Chapter \ref{solar}) and shallow geothermal potential (Chapter \ref{geothermal}). \section{Rooftop solar energy} \label{method_solar} Rooftop solar energy is defined as the electrical or thermal energy harvested from installations on building rooftops, using solar photovoltaic (PV) panels or solar thermal collectors (STC), respectively. While I focus on the potential estimation for solar PV, which is currently the most frequently deployed solar technology on rooftops \cite{kaufmann_schweizerische_2019}, most parts of the methodology % (except the technological model of the solar PV panels) are transferable to STC. This section reviews state-of-the-art approaches to model the hourly electricity yield of rooftop-mounted solar PV systems in the context of large-scale potential studies (Section~\ref{method_solar_hierarchy}), and details the empirical models (Section~\ref{phys_models}) and geospatial techniques (Section~\ref{GIS_methods}) used in this work. I do not claim to provide a complete review of existing modelling approaches, but rather give an overview of the relevant modelling steps and outline popular computational methods for each step. \subsection{Hierarchical approach for PV potential estimation} \label{method_solar_hierarchy} \begin{figure}[b] \centering \includegraphics[width=\linewidth]{images/Figs/hierarchy.png} \caption{Hierarchical approach for estimating rooftop PV potential. Source: \citet{assouline_estimation_2017}} \label{fig:solar_hierarchy} \end{figure} To compute the technical rooftop PV potential, a hierarchical approach as shown in Fig.~\ref{fig:solar_hierarchy} has been widely accepted in the literature \cite{assouline_quantifying_2017,ramirez_camargo_spatio-temporal_2015,izquierdo_method_2008,wiginton_quantifying_2010}. It includes (i) the \textit{physical potential}, that is, the amount of solar energy reaching the earth’s surface, (ii) the \textit{geographic potential}, that is, the amount of solar radiation received by the tilted PV panels, which is affected by the geometry of the panel (slope and aspect), the shading from surrounding buildings and trees and the suitable area for the panel installation, and (iii) the \textit{technical potential}, that is, the maximal electricity output considering the technical characteristics of the PV technology (e.g. efficiency and system performance). After obtaining the technically maximal electricity output, one may consider additional environmental, economic or social aspects to refine the design of the system. A fourth step may thus be an \textit{economic potential} focused on sizing the systems under given constraints. Economic considerations such as technology cost are not taken into account in most large-scale studies. Instead, these are typically assessed in separate studies that use the technical PV potential as input, in smaller-scale case studies as well as in hybrid potential assessments. An analysis of the \textit{market potential} is far beyond the scope of this work. \textbf{Physical potential.} The incoming solar radiation (also referred to as irradiance, in W/m$^2$), consists of three components: (i) a direct or beam component ($G_B$), which describes the direct incoming solar radiation on a horizontal plane, (ii) a diffuse component ($G_D$), which is diffracted for example through the atmosphere and through cloud coverage, and (iii) a surface reflected component ($G_R$), which is driven by the ground surface reflectance, also known as albedo ($\rho$), which can be omitted for horizontal planes \cite{assouline_estimation_2017}. Omitting $G_R$, the global horizontal solar radiation $G_h$ is given by \cite{assouline_estimation_2017}: \begin{equation} \label{eq:Gh_method} G_{h} = G_{B} + G_{D} \end{equation} While most large-scale studies of RPV potential use satellite data or station measurements to obtain $G_h, G_B, G_D$ (e.g. \cite{bodis_high-resolution_2019,buffat_scalable_2018,ramirez_camargo_spatio-temporal_2015,calcabrini_simplified_2019}), empirical models exist to derive these from related variables such as the sunshine duration and the extra-terrestrial solar radiation \cite{assouline_estimation_2017}. In other cases, (geo)-statistical methods have been developed to estimate solar radiation. These include averaging the nearest neighbours \cite{klauser_solarpotentialanalyse_2016}, geostatistical methods such as kriging \cite{alsamamra_comparative_2009,rehman_spatial_2000} as well as Machine Learning approaches such as Support Vector Machines \cite{assouline_quantifying_2017}, Random Forests \cite{assouline_large-scale_2018} and Neural Networks \cite{hocaoglu_hourly_2008,notton_neural_2013,sahin_application_2013}. As averaging tends to oversimplify the modelling, and kriging is costly and requires the modelling of anisotropic spatial correlations and stationarity of the process, data-driven ML algorithms have recently gained much attention due to their performance and speed \cite{kanevski_machine_2009}. Most models do not estimate the uncertainty related to modelling horizontal radiation, which is an important aspect if the results are processed further. A further review of different modelling techniques for horizontal solar radiation is provided in \cite{zhang_critical_2017}. \textbf{Geographic potential.} The solar power received by RPV panels is driven by two factors: (i) the incoming solar radiation on the tilted panels, and (ii) the available area for installing PV, sometimes also quantified as the number of panels that can be installed on a rooftop. As the global horizontal radiation, the tilted radiation ($G_t$) is composed of a direct ($G_{Bt}$), a diffuse ($G_{Dt}$) and a reflected ($G_{Rt}$) component, such that \cite{assouline_estimation_2017}: \begin{equation} \label{eq:POA} G_{t} = G_{Bt} + G_{Dt} + G_{Rt} \end{equation} The three components in Eq.~\eqref{eq:POA} describe the plane-of-array (POA) radiation, obtained from empirical models (Section~\ref{phys_models}), which represents the radiation received by a fully unshaded PV panel in an open space. In urban environments, however, direct shading and a reduced sky visibility from surrounding buildings or other built-up objects, referred to as the sky view factor (SVF), can significantly impact the electricity yield of a PV panel. Hence, many studies of RPV potential use geospatial techniques based on 2D or 3D models of the urban environment, detailed in Section~\ref{GIS_methods}, to quantify shading effects and the SVF (e.g.~\cite{desthieux_solar_2018,calcabrini_simplified_2019,wegertseder_combining_2016,jakubiec_method_2013}). As these are included across the literature as of different factors, partially due to different spatial and temporal resolutions of RPV studies, I do not provide a generalised formulation to include these factors in Eq.~\eqref{eq:POA}. However, in Chapter~\ref{solar_comparison} I will attempt to formulate such a generalisation for studies carried out in Switzerland. The available area for installing PV or STC ($A_{PV}$) is potentially the factor that varies most between different studies of RPV potential, as it is strongly dependent on the scale of the study and on the available rooftop data. Rooftop available area is either quantified as constant factors (e.g. \cite{iea_potential_2002,wegertseder_combining_2016,portmann_sonnendach.ch:_2016}) or derived from building data (\cite{ramirez_camargo_spatio-temporal_2015,assouline_quantifying_2017,hong_development_2017}) and aerial imagery \cite{mainzer_assessment_2017} using geospatial techniques or image processing. As the required geospatial input data may not be available across entire study areas and the computational time for these methods may be prohibitively high, sampling techniques \cite{izquierdo_method_2008}, the use of building prototypes \cite{wegertseder_combining_2016} and extrapolation using ML \cite{assouline_quantifying_2017,assouline_large-scale_2018} based on geospatial methods are mostly used in large-scale studies. A review of statistical and geospatial techniques for quantifying the available roof area is provided in Section~\ref{GIS_methods}, while constant factors are compared in \cite{assouline_estimation_2017,wiginton_quantifying_2010,singh_estimation_2015}. \textbf{Technical potential.} The conversion of tilted solar radiation to PV electricity ($E_{PV}$) is driven by three factors: (i) the tilted radiation ($G_t$), (ii) the available roof area ($A_{PV}$), and (iii) the PV system efficiency. The latter includes the efficiency of the PV panel ($\eta_{PV}$), the inverter efficiency ($\eta_\mathit{inv}$), and other losses ($\eta_\mathit{losses}$) such as panel soiling, degradation, network and wiring losses, partial shading of panels and other factors. The $E_{PV}$ is thus computed as: \begin{equation} E_{PV} = G_t \times A_{PV} \times \eta_{PV} \times \eta_\mathit{inv} \times \eta_\mathit{losses} \end{equation} In some studies, $\eta_\mathit{inv}$ and $\eta_\mathit{losses}$ are combined to the system's performance factor ($\mathit{PF} = \eta_\mathit{inv} \times \eta_\mathit{losses}$) \cite{assouline_large-scale_2018,assouline_quantifying_2017,klauser_solarpotentialanalyse_2016}. While several studies use constant values for panel and inverter efficiencies \cite{wegertseder_combining_2016,romero_rodriguez_assessment_2017,ordonez_analysis_2010,hong_development_2017}, studies at hourly resolution often use analytical or empirical equations to quantify the efficiencies as a function of radiation, temperature, wind speed and technical characteristics of PV systems. These equations range from analytical models of individual cells within a PV panel \cite{buffat_scalable_2018} to coefficient-based empirical models \cite{mainzer_assessment_2017} (see Section~\ref{phys_models}). Other losses are accounted for as constant loss factors in most studies. An overview of the parameters used to estimate the physical, geographic and technical PV potential and a summary of methods to compute these is provided in Table~\ref{tab:solar_methods_compare}. \begin{table}[htbp] \centering \footnotesize \caption[Parameters for estimating the physical, geographic and technical solar energy potential]{Parameters for estimating the physical, geographic and technical potential and a (non-exhaustive) review of methods to compute these parameters.} \label{tab:solar_methods_compare} \resizebox{\textwidth}{!}{% \begin{tabular}{llllll} \hline \textbf{Type} & \textbf{Parameters} & \textbf{Unit} & \textbf{Description} & \textbf{Methods (selected)} & \textbf{References} \\ \hline \multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Physical}\\ \textit{potential}\end{tabular}} & \multirow{4}{*}{$G_h, G_B, G_D$} & \multirow{4}{*}{W/m$^2$} & \multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}Global, direct, diffuse\\ horizontal solar radiation\end{tabular}} & Satellite / measured data & \cite{bodis_high-resolution_2019,buffat_scalable_2018,ramirez_camargo_spatio-temporal_2015,calcabrini_simplified_2019} \\ & & & & Empirical models & \cite{assouline_estimation_2017} \\ & & & & Kriging & \cite{alsamamra_comparative_2009,rehman_spatial_2000} \\ & & & & Machine Learning & \cite{assouline_quantifying_2017,assouline_large-scale_2018,hocaoglu_hourly_2008,notton_neural_2013,sahin_application_2013} \\ \hline \multirow{11}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Geographic}\\ \textit{potential}\end{tabular}} & $G_t$ & \multirow{4}{*}{W/m$^2$} & Tilted (POA) radiation & $G_t = G_{Bt} + G_{Dt} + G_{Rt}$ & \\ & $G_{Bt}, G_{Rt}$ & & Direct, reflected component & Geometric projection & \cite{gulin_estimation_2013} \\ & \multirow{2}{*}{$G_{Dt}$} & & \multirow{2}{*}{Diffuse component} & Perez model & \cite{perez_modeling_1990, buffat_scalable_2018,jakubiec_method_2013,mainzer_assessment_2017,wegertseder_combining_2016} \\ & & & & Other (Hay, Liu-Jordan, Klein) & \cite{desthieux_solar_2018,izquierdo_method_2008,singh_estimation_2015, assouline_estimation_2017} \\ \cline{2-6} & \multirow{3}{*}{\begin{tabular}[c]{@{}l@{}}SVF / \\ Shading\end{tabular}} & \multirow{3}{*}{-} & \multirow{3}{*}{\begin{tabular}[c]{@{}l@{}}Sky view factor /\\ Shaded roof area\end{tabular}} & Fisheye images & \cite{calcabrini_simplified_2019}\\ & & & & Digital surface model/LiDAR & \cite{jakubiec_method_2013,desthieux_solar_2018,hong_development_2017,strzalka_large_2012} \\ & & & & Roof fraction (constant) & \cite{mainzer_assessment_2017,wiginton_quantifying_2010, izquierdo_method_2008} \\ \cline{2-6} & \multirow{4}{*}{$A_{PV}$} & \multirow{4}{*}{m$^2$} & \multirow{4}{*}{Available roof area} & Entire roof area & \cite{ramirez_camargo_spatio-temporal_2015,buffat_scalable_2018,hong_development_2017} \\ & & & & Roof fraction (constant) & \cite{iea_potential_2002,wegertseder_combining_2016,portmann_sonnendach.ch:_2016,wiginton_quantifying_2010} \\ & & & & Derived from geospatial data & \cite{assouline_large-scale_2018,ordonez_analysis_2010, mainzer_assessment_2017} \\ & & & & Extrapolation to the large scale & \cite{izquierdo_method_2008, wegertseder_combining_2016,assouline_quantifying_2017} \\ \hline \multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Technical}\\ \textit{potential}\end{tabular}} & \multirow{2}{*}{$\eta_{PV}, \eta_{\mathit{inv}}$} & \multirow{2}{*}{-} & \multirow{2}{*}{\begin{tabular}[c]{@{}l@{}}PV panel, inverter \\ efficiency\end{tabular}} & Empirical model & \cite{jakubiec_method_2013,ramirez_camargo_spatio-temporal_2015,lukac_buildings_2014,buffat_scalable_2018,mainzer_assessment_2017,calcabrini_simplified_2019} \\ & & & & Constant value & \cite{wegertseder_combining_2016,romero_rodriguez_assessment_2017,ordonez_analysis_2010,assouline_quantifying_2017,hong_development_2017,bodis_high-resolution_2019} \\ & $\eta_{\mathit{losses}}$ & - & \begin{tabular}[c]{@{}l@{}}System losses (Soiling, wiring,\\ degradation etc.)\end{tabular} & Constant value & \cite{klauser_solarpotentialanalyse_2016,buffat_scalable_2018,mainzer_assessment_2017,lorenz_regional_2011,bodis_high-resolution_2019} \\ \hline \end{tabular} } \end{table} \subsection{Empirical models} \label{phys_models} Empirical models are widely used in the modelling of the electricity yield of RPV systems, as they allow to estimate relevant physical variables with little computational effort and from widely available data. In particular, empirical models are used in the literature (i) to quantify the POA radiation, namely the tilted radiation components ($G_{Bt}, G_{Dt}, G_{Rt}$) incident to the inclined PV panels, and (ii) to model solar PV technology, more specifically the efficiency of the PV panel and the inverter, which converts the direct current (DC) PV output to "grid-level" alternating current (AC). \subsubsection{Plane-of-array radiation} \label{app:irrad} \begin{figure}[b] \centering \begin{subfigure}[t]{.43\textwidth} \centering \includegraphics[width=\linewidth]{images/Figs/poa_2.pdf} \subcaption{} \label{figa:solar_geom} \end{subfigure} \begin{subfigure}[t]{.53\textwidth} \centering \includegraphics[width=\linewidth]{images/Figs/diffuse_components.png} \subcaption{} \label{figb:solar_geom} \end{subfigure} \caption[Geometrical models for calculating the angle of incidence of direct beam radiation and the diffuse radiation components Source: \citet{brownson_44_nodate}]{Geometrical models for calculating (a) the angle of incidence ($\theta$) of direct beam radiation on a tilted plane, and (b) the isotropic, circumsolar and horizon diffuse components (Source (b): \citet{brownson_44_nodate}).} \label{fig:solar_geom} \end{figure} To compute the direct, diffuse and reflected tilted radiation components $G_{Bt}$, $G_{Dt}$ and $G_{Rt}$, empirical and geometrical models are widely accepted in the literature. The direct component is obtained from a geometrical projection of the horizontal beam component $G_B$ onto the angle of incidence of the sun rays on the tilted panel ($\theta$) (see Fig.~\ref{figa:solar_geom}). Mathematically, this projection is defined as \cite{gulin_estimation_2013}: \begin{equation} \label{eq:direct} G_{Bt} = G_{B} * \max \left( 0, \frac{\cos(\theta)}{\cos(\theta_Z)} \right) \end{equation} where \begin{equation} \label{eq:dir_angle} \cos(\theta) = \sin(\beta) \sin(\theta_Z) \cos(\gamma_S - \gamma) + \cos(\beta) \cos(\theta_Z) \end{equation} The angles $\theta_Z$ and $\gamma_S$ describe the sun zenith and azimuth angles, respectively, while roof tilt and aspect are given by $\beta$ and $\gamma$. Diffuse tilted radiation is more complex to determine due to its diffracted nature. Several models exist with different assumptions and hence varying complexity. \citet{assouline_estimation_2017} review these different models and compare the temporal resolutions for which they are applicable. Of the methods listed in \citet{assouline_estimation_2017} , the Perez model ~\cite{perez_modeling_1990} is the most frequently used model in the reviewed literature \cite{buffat_scalable_2018,jakubiec_method_2013,mainzer_assessment_2017,wegertseder_combining_2016}, and will also be used throughout this thesis. The empirical formula for $G_{Dt}$ using the Perez model is given by~\cite{perez_modeling_1990}: \begin{equation} \label{eq:diffuse} G_{Dt} = G_D * \left[ (1 - F_1) \left( \frac{1 + \cos \beta}{2} \right) + F_1 \frac{ a }{ b } + F_2 \sin \beta \right] \end{equation} where $F_1$ and $F_2$ are empirically fitted functions for the circumsolar and horizon brightness and $a$, $b$ are geometric angles. The derivation of these factors is described in \cite{loutzenhiser_empirical_2007}. The three addends in Eq.~\eqref{eq:diffuse} represent an isotropic component, a circumsolar component originating from the sun disk (modelled as a point source), and a horizon component, respectively, as shown in Fig.~\ref{figb:solar_geom}. The reflected radiation ($G_{Rt}$) is again obtained from a widely used geometric projection based on the surface albedo $\rho$, which is defined as \cite{duffie_solar_2013}: \begin{equation} \label{eq:reflected} G_{Rt} = G_h * \rho \left( \frac{1-\cos \beta}{2} \right) \end{equation} To ease the notation, Equations~\eqref{eq:direct}-\eqref{eq:reflected} can be referred to as: \begin{equation} \label{eq:tilted_irrad_simplified} G_{Bt} = F_B G_B, \quad G_{Dt} = F_D G_D, \quad G_{Rt} = F_R G_h \end{equation} \subsubsection{PV module and inverter efficiency} \label{app:efficiency} Various empirical models exist to compute the electricity yield of PV systems. To compute the panel efficiency ($\eta_{PV}$), the most detailed models simulate the maximum power point of the PV panel using single-diode current voltage equations \cite{strzalka_large_2012,buffat_scalable_2018}. Most studies use a more generalised approach, which models $\eta_{PV}$ as a function of $G_t$, the PV cell temperature (derived from the ambient temperature) and the nominal performance rating of the PV panel \cite{jakubiec_method_2013,calcabrini_simplified_2019,singh_estimation_2015,ramirez_camargo_spatio-temporal_2015,mainzer_assessment_2017}. \citet{jakubiec_method_2013} compare PV efficiency models for widely used methods for two test buildings. A general formulation of the DC power output of a PV panel ($P_{dc}$) is formulated in the \textit{PVWatts} model developed by the National Renewable Energy Laboratory (NREL) \cite{dobos_pvwatts_2014}, which is used in similar formulations for example in \cite{jakubiec_method_2013,ramirez_camargo_spatio-temporal_2015,singh_estimation_2015,calcabrini_simplified_2019}. In this work, I follow these studies, modelling the DC power output of a PV panel as: \begin{equation} \label{eq:Pdc} P_{dc} = \frac{G_{t}}{1000} P_{dc0} (1 + \gamma_\mathit{pdc}(T_\mathit{cell}-T_\mathit{ref})) \end{equation} where $G_t$ is the tilted radiation, $P_{dc0}$ is the DC rating of the panel, $\gamma_\mathit{pdc}$ is its temperature coefficient, $T_\mathit{cell}$ is the cell temperature and $T_\mathit{ref}$ is the reference temperature. % I use average panel specifications of mid-range 60-cell mono-crystalline PV modules, the most frequently used technology in Switzerland \cite{buffat_scalable_2018}, from three market-leading manufacturers (JA Solar, Jinko Solar, Trinasolar). The values are reported in Table~\ref{tab:efficiency}. % From the DC power output and the area of a PV panel ($A_\mathit{panel}$), the module efficiency $\eta_{PV}$ is computed as: \begin{equation} \label{eq:eff} \eta_{PV} = \frac{P_{dc}}{G_{t} * A_\mathit{panel} } \end{equation} \begin{table}[tb] \centering \footnotesize \caption{Parameters used in the PV module and inverter efficiency models \cite{dobos_pvwatts_2014, faiman_assessing_2008}.} \label{tab:efficiency} \begin{tabular}{lll} \hline \textbf{Parameter} & \textbf{Value} & \textbf{Description} \\ \hline $P_{dc0}$ & 285 Wp & Nameplate DC rating \\ $\gamma_{pdc}$ & $-0.39$ \%/°C& Temperature coefficient \\ $T_\mathit{ref}$ & 25 °C & Cell reference temperature \\ $\alpha$ & 0.9 & Absorption coefficient \\ $\eta_m$ & 0.17 & Nominal module efficiency \\ $U$ & 15 W/m$^2$K & Heat transfer component \\ $A_\mathit{panel}$ & 1.6 m$^2$ & PV panel area \\ $\eta_\mathit{nom}$ & 0.96 & Nominal inverter efficiency \\ $\eta_\mathit{ref}$ & 0.9637 & Reference inverter efficiency \\ \hline \end{tabular} \end{table} With the exception of \cite{calcabrini_simplified_2019}, $T_\mathit{cell}$ is computed in the literature as a linear combination of the ambient temperature $T_\mathit{amb}$ and $G_t$, whereby $G_t$ is multiplied with a constant coefficient based on nominal operating conditions and corrected for the roof absorptivity and radiative heat losses \cite{jakubiec_method_2013}. % In this work, I use the \textit{PVSyst} model~\cite{faiman_assessing_2008} to derive $T_\mathit{cell}$ from $G_t$ and $T_\mathit{amb}$: \begin{equation} T_\mathit{cell} = T_\mathit{amb} + G_t \frac{\alpha (1 - \eta_m)}{U} \end{equation} where $\alpha$ denoted the absorption coefficient, $\eta_m$ denotes the module efficiency and $U$ is the heat transfer component, for which default values for rooftop-mounted PV systems are used as suggested by Sandia National Laboratories \cite{holmgren_pvlib_2018} (Table~\ref{tab:efficiency}). In contrast to Eq.~\eqref{eq:Pdc}, which is based on physical principles, the inverter efficiency ($\eta_\mathit{inv}$) is modelled using a fully empirical formula with three free parameters. Out of the various sets of coefficients in the literature \cite{mainzer_assessment_2017,lukac_buildings_2014}, I use the empirical loss formula from \textit{PVWatts}, also used in \cite{buffat_scalable_2018}, for consistency with Eq.~\eqref{eq:Pdc}. It is defined as \cite{dobos_pvwatts_2014}: \begin{equation} \label{eq:inv} \eta_\mathit{inv} = \frac{\eta_\mathit{nom}}{\eta_\mathit{ref}} \left( -0.0162 * \zeta - \frac{0.0059}{\zeta} + 0.9858 \right) \end{equation} where $\zeta = P_{dc}/P_{dc0}$, $\eta_\mathit{nom}$ is the nominal inverter efficiency and $\eta_\mathit{ref}$ is the reference efficiency (see Table \ref{tab:efficiency} for values used in this work). % Multiplying $\eta_\mathit{inv}$ with other system losses ($\eta_\mathit{losses}$, including soiling, degradation, mismatch, wiring and connection, etc.) yields the performance factor (\textit{PF}) \cite{klauser_solarpotentialanalyse_2016}. These additional losses are estimated throughout this work as 14\% \cite{dobos_pvwatts_2014}. The implementation of the empirical models is based on the \texttt{pvlib} python package developed by Sandia National Laboratories \cite{holmgren_pvlib_2018}. \subsection{Geospatial techniques} \label{GIS_methods} In the context of large-scale studies of solar PV potential, geospatial methods are used to accurately quantify shading effects, the sky view factor and the available area for PV installation. The primary inputs for these methods are 2D building or roof geometries in vector format, as well as 3D Light Detection and Ranging (LiDAR) point clouds or digital elevation models (DEM) in raster format. While LiDAR data has a higher spatial resolution and is hence used for building or neighbourhood-scale studies, DEMs are mostly used for studies at regional scale for reasons of computational efficiency. In the following, existing methods to compute shading, SVF and available area are summarized, with a focus on methods that are scalable to entire cities or regions. Alternative approaches, including constant-value methods and extrapolation techniques, are reviewed in \cite{assouline_estimation_2017}. \subsubsection{Shading effects and sky view factor} \begin{figure}[b] \centering \begin{subfigure}[t]{.49\textwidth} \centering % include second image \includegraphics[trim=100 0 130 0, clip, width=.9\linewidth]{Figs/horizon.pdf} \subcaption{} \label{figa:horizon_method} \end{subfigure} \begin{subfigure}[t]{.49\textwidth} \centering % include second image \includegraphics[width=\linewidth]{images/Figs/skyview_point_w_star.pdf} \subcaption{} \label{figb:horizon_method} \end{subfigure} \caption[Horizon modelling for an example roof]{Horizon modelling for an example roof. (a) Conceptual graph with relevant horizon bins for quantification of shading effects, (b) polar plot of horizon height and visible sky proportion ($\mathit{SVF} = 0.3$), with an example sun position for Switzerland (September, 15h UTC).} \label{fig:horizon_method} \end{figure} The computation of shading effects and the SVF follow a common 3D modelling approach, known as shadow casting \cite{levinson_solar_2009,desthieux_solar_2018}, ray tracing \cite{jakubiec_method_2013} or viewshed computation \cite{nguyen_incorporating_2012}. As shown conceptually in Fig.~\ref{figa:horizon_method}, a 3D model of the environment is used to obtain the horizon height for each aspect angle. % This horizon height ($\theta_\mathit{min}$ in Fig.~\ref{figa:horizon_method}) describes the visible proportion of the sky for a specific aspect angle ($\gamma$ in Fig.~\ref{figa:horizon_method}), and may also be interpreted as the minimum sun elevation angle required to illuminate a given point. % The horizon height for all aspect angles can be visualised as a polar plot, from which the SVF is derived as the visible sky fraction (Fig.~\ref{figb:horizon_method}). The polar plot further indicates whether the given point is shaded during a particular hour of the year based on the sun position (see star in Fig.~\ref{figb:horizon_method}). \begin{figure}[tb!] \centering \begin{subfigure}{.49\textwidth} \centering % include second image \includegraphics[height=.9\linewidth]{images/Figs/demo_vis_09_15h_w_cross.pdf} \subcaption{} \label{figa:raster_svf_ssh} \end{subfigure} \begin{subfigure}{.49\textwidth} \centering % include second image \includegraphics[height=.9\linewidth]{images/Figs/svf_w_legend.pdf} \subcaption{} \label{figb:raster_svf_ssh} \end{subfigure} \caption[Raster-based cast shadows and sky view factor for an example area in Geneva, Switzerland]{Raster-based (a) cast shadows for one example hour (15h UTC in September, see star in Fig. \ref{figb:horizon_method}) and (b) sky view factor, for an area of $200\times200$ m$^2$ in Geneva, Switzerland. The red/black cross on the figures shows the point for which the horizon height is shown in Fig. \ref{figb:horizon_method}.} \label{fig:raster_svf_ssh} \end{figure} While fisheye images \cite{calcabrini_simplified_2019} or LiDAR point clouds \cite{bill_3d_2016,jakubiec_method_2013} may be used for horizon computations, DEMs are by far the most widely used input data, as they enable the computation of horizon heights for an entire region \cite{suri_new_2004}. Using a DEM to compute the horizon heights yields a set of raster maps, one for each azimuth angle. From these, maps of cast shadows (Fig.~\ref{figa:raster_svf_ssh}) can be computed for any sun position, by selecting the horizon map corresponding to the sun azimuth angle and applying a binary filter to the raster values, setting all values below the sun altitude to 1 (sun exposure) and all values above the sun altitude to 0 (cast shadow). % The SVF is obtained from a combination of the same set of horizon maps, yielding an SVF for each pixel (Fig.~\ref{figb:raster_svf_ssh}). The SVF is independent of the sun position and hence constant with time. While some studies compute PV potential at the level of individual pixels \cite{buffat_scalable_2018,ramirez_camargo_spatio-temporal_2015}, the results are often aggregated per building or roof surface \cite{assouline_quantifying_2017,assouline_large-scale_2018,klauser_solarpotentialanalyse_2016}. To aggregate shading effects or the sky view factor per roof, existing methods use 2D building or roof geometries, which are geospatially intersected with the raster maps to average all pixels within a given roof/building. This geospatial overlay further reduces the data storage requirements for the maps of shading and SVF, as all non-relevant areas for rooftop RPV potential are excluded. In this work, I use the shadow casting and SVF computation method as detailed above. I use the open-source GRASS-GIS computational engine, which supports the computation of horizon heights \cite{suri_new_2004} and the sky view factor \cite{zaksek_sky-view_2011} and which is used in several of the reviewed studies \cite{ramirez_camargo_spatio-temporal_2015,nguyen_incorporating_2012,buffat_scalable_2018}. Further details regarding the implementation of the computation of shading effects and the SVF for scalability to the Swiss national scale are provided Appendix~\ref{app:shade}. \subsubsection{Available area for solar panel installation} Based on the an early study of RPV potential in Europe \cite{iea_potential_2002}, three factors of the rooftop available area have been identified, namely (i) the total roof area, (ii) the roof utilisation and (iii)~the roof exposure or suitability. \\ \textbf{Total roof area.} For the estimation of RPV yield, three characteristics of the roof areas are relevant: the roof geometries, the tilt angle and the aspect angle. Geospatial datasets containing these characteristics are already available in some countries such as Switzerland (see Chapter~\ref{data_buildings}). If no rooftop data is available, rooftop tilt and aspect angles can be derived from a DEM \cite{ramirez_camargo_spatio-temporal_2015} or LiDAR point cloud \cite{buffat_feature-aware_2016,nguyen_incorporating_2012}. To classify different roof surfaces, these may be combined with building geometries from Open Street Map (OSM) or local survey data \cite{ramirez_camargo_spatio-temporal_2015}. \citet{mohajeri_city-scale_2018} propose an ML approach to classify roof shapes based on DEM-derived features aggregated within building geometries. In absence of building geometries, aerial images may be segmented using GIS-based feature extraction \cite{wiginton_quantifying_2010} or Convolutional Neural Networks (CNN) \cite{zhao_object-based_2017} to detect building footprints. Even without a DEM, LiDAR or other 3D building dataset, the roof aspect angle can be approximated from the building geometry as the angle normal to the roof ridge line or building wall \cite{mainzer_assessment_2017}. Aerial images themselves however do not provide any information on the roof tilt angle. For the purpose of large-scale RPV studies, \citet{mainzer_assessment_2017} thus suggest to use a statistical approach, which randomly assigns tilt angles using a normal distribution fitted for a set of buildings with available roof tilt data for Germany. \textbf{Roof utilisation.} Roof utilisation refers here to any factors reducing the available area for solar panel installation due to alternative uses. Using the definition by the \citet{iea_potential_2002} ("architectural suitability"), this includes (i) any obstructing objects on roofs, such as fixed superstructures (e.g. dormers, chimneys, elevators), roof-mounted structures (e.g. HVAC equipment) and windows, (ii) surfaces used for other purposes, such as roof terraces or gardens, (iii) protected roofs, and (iv) shaded areas. As quantifying these factors requires the availability of high-resolution 3D building data or aerial imagery, many studies employ constant value methods \cite{assouline_estimation_2017}, which assume a constant ratio of available area. These constant values may vary for different building types and/or roof sizes \cite{wegertseder_combining_2016,portmann_sonnendach.ch:_2016,wiginton_quantifying_2010}. Obstructing objects on roofs are most easily identified and removed from the total roof area using manual labelling of aerial images, as performed for example in \cite{ordonez_analysis_2010,strzalka_large_2012}. This approach is only feasible for a small sample size \cite{ordonez_analysis_2010}. At larger scale, high-resolution 3D building models, such as CityGML datasets with an LOD~4 standard, have been used to exclude superstructures from rooftops \cite{assouline_quantifying_2017}. These 3D building models however do not include rooftop-mounted structures or windows. Such objects can only be automatically detected using image processing approaches, as proposed by \citet{mainzer_assessment_2017} who use an edge-detection algorithm to identify obstructing objects. \citet{castello_deep_2019} have used a CNN algorithm to detect existing PV panels on building rooftops, which may be transferable to the detection of available area for RPV installation. Going beyond the detection of obstructing objects, several studies assess the area that may be covered by PV panels, by virtually installing PV panels (rectangular polygons) on the geometries of the available area \cite{ordonez_analysis_2010,mainzer_assessment_2017,assouline_large-scale_2018}. This virtual panel placement accounts for the impact of rooftop geometries and the location of existing superstructures on the potential number of PV panels and allows to simulate different arrangements of PV panels, for example on flat roofs \cite{ordonez_analysis_2010}. In contrast to obstructing objects, alternative uses of the roof surfaces and potential protection of buildings cannot be derived from 3D models. Aerial imagery may provide some information to this aim, as terraces for example may contain some non-fixed objects such as tables and chairs which could be classified as unsuitable for PV installation by an automatic image processing method. However, no current methodology follows this approach. Protected roofs may be identified by local inventories of protected buildings. Alternatively, \citet{florio_assessing_2018} propose a method to quantify rooftop visibility in urban settings. Shaded areas, which may be obtained from the shadow casting approach described above, are excluded as part of the roof utilisation for example in \cite{singh_estimation_2015,hong_development_2017}. \textbf{Roof exposure/suitability.} In addition to roof utilisation, large-scale studies of RPV potential exclude areas which are expected to provide a low electricity yield. This roof suitability may be based on a low solar irradiation (e.g. $< 1000$ kWh/m$^2$ \cite{buffat_scalable_2018,portmann_sonnendach.ch:_2016,desthieux_solar_2018}), on the roof aspect (e.g. $\leq 90$° from south \cite{assouline_quantifying_2017,assouline_large-scale_2018,nguyen_incorporating_2012}) or on the roof tilt (e.g. $\leq 60$° \cite{jakubiec_method_2013}). Additionally, large-scale RPV potential studies frequently exclude small areas, as these are unlikely to be economically feasible. Minimum roof area thresholds in the literature range from 8 m$^2$ \cite{assouline_large-scale_2018} to 33 m$^2$\cite{hong_development_2017}. \section{Shallow geothermal energy} \label{geo_method} As discussed in Section ~\ref{intro_geo}, ground-source heat pumps (GSHPs) using vertical closed-loop borehole heat exchangers (BHEs) are by far the most widely used technology to extract shallow geothermal energy in Switzerland \cite{link_statistik_2019}. % Focusing on closed-loop BHEs, this section reviews state-of-the-art analytical models to assess the technical geothermal potential of shallow GSHPs. The technical potential is hereby defined as the heat energy that may be annually extracted from a field of BHEs by a GSHP system ($Q_{GSHP}$), which is computed as (cf. \cite{pahud_geothermal_2002}): \begin{equation} \label{eq:Q_field_method} Q_\mathit{GSHP}=q_\mathit{max} \times t_{op} \times H \times N_\mathit{BHE} \end{equation} where $q_\mathit{max}$ is the heat extraction rate from the boreholes (in W/m), $t_{op}$ is the operating time of the system (in h), $H$ is the borehole depth (in m) and $ N_\mathit{BHE}$ is the number of installed BHEs. To estimate the technical potential, the following factors must be taken into account: (i) The depletion of the ground when boreholes are placed too close to each other or too much heat is extracted; (ii) the sizing, geometry and material of the BHE tubes; (iii) the efficiency of the heat pumps that transfer the extracted heat to domestic heating and hot water applications \cite{bayer_geothermal_2019}. \begin{figure}[bt] \centering \includegraphics[width=0.7\linewidth]{Figs/BHE_layout.png} \caption[Single BHE installation and its effects on the temperature in the ground. Source: \citet{wagner_erdsondenpotenzial_2014}]{Single BHE installation and its effects on the temperature in the ground. The arrows on the top indicate the natural heat flux from the environment, those on the bottom indicate the geothermal heat flux. The dashed lines are isothermal lines, while the continuous lines represent the heat flow in the stationary state. Source: \citet{wagner_erdsondenpotenzial_2014}} \label{fig:BHE} \end{figure} The depletion of the ground is driven by changes in the subsurface temperature, which occur when heat is extracted (or injected) at a faster rate than it is regenerated from the natural heat flux. This heat flux is caused by the radioactive decay of chemical elements in the earth's core (geothermal heat flux) and by the heat in the ambient air, as shown in Fig.~\ref{fig:BHE} \cite{wagner_erdsondenpotenzial_2014}. When heat is extracted from the ground, so-called \textit{thermal plumes} form around the BHE (dashed lines in Fig.~\ref{fig:BHE}) \cite{alcaraz_t-i-ger_2017}. Their magnitude and extent depends on the borehole design, particularly the depth and heat extraction rate, as well as on the thermal properties of the ground. % When boreholes are spaced too close to each other, thermal plumes may overlap, which causes \textit{thermal interference} between these boreholes and leads to an increased cooling of the subsurface. In this work, quantifying the thermal interference is of high importance, as thermal interference significantly reduces the potential borehole installation density \cite{bayer_geothermal_2019,miglani_methodology_2018}. Thermal interference between boreholes is assessed in the literature based on an analytical model, developed by \citet{eskilson_thermal_1987}, which simulates the thermodynamic behaviour of BHEs based on the thermal ground properties as well as the sizing, geometry and material of the BHEs. To obtain a technical potential for closed-loop GSHPs, related studies (e.g. \cite{miglani_methodology_2018,rivera_increased_2017,schiel_gis-based_2016,viesi_gis-supported_2018,casasso_g.pot:_2016,perego_techno-economic_2019}) combine this analytical model with norms for BHE design practice, using primarily the geothermal norms of the Association of German Engineers (VDI) \cite{vdi_vdi_2019} and the Swiss Society of Engineers and Architects (SIA) \cite{sia_sondes_2010}. The parameters required for the analytical model and their values given by the SIA norm (relevant for Switzerland) will be introduced in Section~\ref{geo_params}. Both the VDI and SIA norms require the BHEs to be designed and operated such that the mean temperature of the borehole fluid ($T_{mf}$) never drops below its freezing temperature ($T_\mathit{mf,min}=-1.5$ °C \cite{sia_sondes_2010}), such that \cite{sia_sondes_2010,vdi_vdi_2019}: \begin{equation} \label{eq:T_mf_min} T_{mf} \geq T_\mathit{mf,min} = -1.5 \text{°C} \end{equation} The variables in Eq.~\eqref{eq:Q_field_method} ($q_\mathit{max}$, $t_{op}$, $H$, $N_\mathit{BHE}$) must hence be chosen such as to fulfil this technical requirement. % The relationship between these variables and the $T_{mf}$ is expressed by the borehole \textit{temperature profile}, which describes the temperature changes inside and around a BHE. A general model of this temperature profile, proposed by \citet{claesson_conductive_1988}, is introduced in Section~\ref{model_intro}, while Section~\ref{geo_models} details the analytical formulas used to estimate the thermodynamic processes in the ground. \\ While heat extraction is the dominant mode of operation of GSHPs in Europe \cite{lund_direct_2020}, GSHPs can also be used for heat injection, for example for space cooling applications \cite{kavanaugh_geothermal_2014}, whereby the same physical principles (Sections ~\ref{model_intro} and \ref{geo_models}) apply. As heat injection is not addressed in the SIA or VDI norms, studies on the potential of GSHPs for heat injection use the standards of the American Society of Heating, Refrigerating and Air-Conditioning Engineers (ASHRAE) \cite{kavanaugh_geothermal_2014} for borehole sizing \cite{aditya_environmental_2020,miglani_methodology_2018,michopoulos_potential_2011} . For cooling-dominated systems, the temperature requirement from Eq.~\eqref{eq:T_mf_min} is inverted, limiting the maximum fluid temperature $T_\mathit{mf,max}$: \begin{equation} \label{eq:T_mf_max} T_{mf} \leq T_\mathit{mf,max} \end{equation} \begin{figure}[tb] \centering \includegraphics[width=0.7\linewidth]{images/Figs/GSHP.png} \caption[Working principle of a GSHP in heating cycle. Source: \citet{gns_science_nz_geothermal_2016}]{Working principle of a GSHP in heating cycle. Cold water is moved from the house to the heat pump, where it is heated from the compressed heat extracted from the ground. The ground loop liquid is cooled and transported back to the ground, where it is again heated. Source: GNS Science \cite{gns_science_nz_geothermal_2016}.} \label{fig:HP} \end{figure} In addition to the BHEs, a GSHP system contains a heat pump (HP), which is used to transfer the heat extracted from the ground to the building heating or cooling systems (see Fig. \ref{fig:HP}). HPs have a high system efficiency, expressed through the coefficient of performance (COP), making GSHPs a very energy efficient technology for building thermal energy supply. Based on the COP, the heat supplied to the building heating ($Q_\mathit{heat}$) and cooling ($Q_\mathit{cool}$) systems is computed as \cite{kavanaugh_geothermal_2014}: \begin{equation} \label{eq:COP_heat} Q_\mathit{heat}=Q_\mathit{extr}\ \frac{COP_\mathit{heat}}{\left(COP_\mathit{heat}-1\right)} \end{equation} \begin{equation} \label{eq:COP_cool} Q_\mathit{cool}=Q_\mathit{inj}\ \frac{COP_\mathit{cool}}{\left(COP_\mathit{cool}+1\right)} \end{equation} where $Q_\mathit{extr}$ and $Q_\mathit{inj}$ are the extracted and injected heat from/to the BHEs and $COP_\mathit{heat}$/$COP_\mathit{cool}$ are the COPs for heating and cooling, respectively. In neighbourhood or regional-scale studies of technical GSHP potential, constant values are often used for the COP \cite{miglani_methodology_2018,schiel_gis-based_2016,perego_techno-economic_2019}, while studies at building scale typically model the COP as a function of temperature \cite{fraga_heat_2018,liu_feasibility_2017,stene_residential_2005}. \subsection{Modelling parameters} \label{geo_params} The parameters involved in the modelling of the thermodynamic behaviour of borehole heat exchangers can be divided into three groups: \begin{enumerate} \item \textbf{Physical parameters} are given by the geological and hydrological conditions of the ground, and are the subject of various large-scale studies \cite{signorelli_regional_2004,majorowicz_estimation_2009,tian_improved_2020}. Seasonal temperature variations in the ground are neglected, as boreholes typically have a depth $> 50$ m, which is beyond the penetration depth of seasonal variations in the ground \cite{stauffer_thermal_2013}. \item \textbf{Technical parameters} are derived from the materials and the technology of the BHEs. In this work, constant values based on the related literature are used. The impact of their variation on the BHE potential is not addressed. \item \textbf{Design parameters} are related to the sizing of the BHEs and the borehole fields, in order to estimate a technical potential of GSHPs. Optimizing these parameters is the main objective of the technical potential estimation. \end{enumerate} I refer to $z$ (in m) for the depth in the ground, $r$ (in m) for the radial distance to the center of a BHE, $t$ (in hours or years) for the time and $T$ (in °C or K) for the temperature. \begin{table}[b] \footnotesize \caption[Physical parameters for estimating shallow GSHP potential]{Physical parameters. Norm values and ranges for Switzerland are obtained from \citep{sia_sondes_2010}.} \label{tab:phys_params} \centering \begin{tabular}{lllll} \hline \textbf{Symbol} & \textbf{Unit} & \textbf{Description} & \textbf{Formula} & \textbf{Norm value / range (CH)} \\ \hline $\lambda$ & W/mK & Thermal conductivity & & $1-4$ (Plateau: $2-3$) \\ $\alpha$ & m$^2$/s & Thermal diffusivity & $\alpha = \frac{\lambda}{\rho C}$ & $0.9-1.4 \times 10^{-6}$ \\ $\rho C$ & MJ/m$^3$K & Volumetric heat capacity & & $1.2-3.5$ (Plateau: $2-2.5$) \\ \hline $T_g(z)$ & °C & Undisturbed ground temperature & & \textit{Norm}: 10 \\ $\frac{\delta T}{\delta z}$ & K/m & Temperature gradient & $T_g(z) = T_0 + z * \frac{\delta T}{\delta z} $ & Plateau: 0.03, Alps: 0.025 \\ $T_0$ & °C & Surface temperature & & \textit{Norm}: 8.5 \\ \hline $\dot{q}_{g}$ & mW/m$^2$ & Geothermal heat flow & $\dot{q}_{g} = \lambda * \frac{\delta T}{\delta z}$ & $40-170$ \\ \hline \end{tabular} \end{table} \textbf{Physical parameters}. The physical parameters are listed in Table~\ref{tab:phys_params}. The \textit{thermal conductivity} $\lambda$ and \textit{thermal diffusivity} $\alpha$ are used in the analytical models. However, the geological properties of different types of rocks are typically provided as $\lambda$ and the \textit{volumetric heat capacity} $\rho C$. From these parameters, the thermal diffusivity is computed as \cite{pahud_geothermal_2002}: \begin{equation} \label{eq:alpha} \alpha = \frac{\lambda}{\rho C} \end{equation} In the literature, the values for $\rho C$ and $\lambda$ are either derived from measurements and interpolated using kriging \cite{tian_improved_2020,munoz_estimating_2015} or Machine Learning~\cite{assouline_geothermal_2019}, obtained from 3D underground models \cite{garcia-gil_gis-supported_2015,groupe_de_travail_pgg_evaluation_2011-1} or, most frequently, mapped from tabulated values based on the present rock types~\cite{perego_techno-economic_2019,galgaro_empirical_2015,casasso_g.pot:_2016,gemelli_gis-based_2011}. An overview of typical thermal conductivity and heat capacity values for rock types in Switzerland are given in the SIA norm \cite{sia_sondes_2010} and by \citet{pahud_geothermal_2002}. If hydro-geological data is available, the thermal conductivity can be adjusted to account for conductive heat transfer due to groundwater movement \cite{viesi_gis-supported_2018,assouline_geothermal_2019}, using the ground saturation level and the darcy velocity as input \cite{viesi_gis-supported_2018}. Advective heat transfer is neglected in the model proposed by \citet{eskilson_thermal_1987}. Analytical models accounting for advective heat transfer have been used for example in \cite{garcia-gil_gis-supported_2015,alcaraz_advection_2016,alcaraz_t-i-ger_2017,attard_novel_2020}. As the hydro-geological input data required for these advective-conductive models is not available at the regional scale for Switzerland, this thesis focuses on the conductive heat transfer model of \citet{claesson_conductive_1988}. A similar approximation is applied in most regional-scale studies, including \cite{perego_techno-economic_2019,galgaro_empirical_2015,casasso_g.pot:_2016,rivera_increased_2017,schiel_gis-based_2016}. Another important physical parameter is the \textit{undisturbed ground temperature} $T_g(z)$. In particular, the temperature at half of the borehole depth ($z = H/2$) is of interest. If no data is available for estimating $T_g$ directly, it can be extrapolated from the \textit{surface temperature} $T_0$ and the \textit{temperature gradient} $\delta T/\delta z$: \begin{equation} \label{eq:Tg} T_g(z) = T_0 + z * \frac{\delta T}{\delta z} \end{equation} \begin{figure} \centering \includegraphics[width=0.6\linewidth]{Figs/ground_temperature.png} \caption[Example for the variation in ground temperature for two measurement sites in Zürich. Source: \citet{huber_bodentemperaturen_2014}]{Example for the variation in ground temperature for two measurements in Zürich, showing the impact of altitude (black line) and surface heating through the presence of forests (red line). Source: \citet{huber_bodentemperaturen_2014}.} \label{fig:T_ground} \end{figure} The temperature $T_0$ can be interpolated based on measurements of the near-surface ground temperature \cite{assouline_geothermal_2019} or derived from measurements of the ambient temperature ($T_\mathit{amb}$). Different methods to determine $T_0$ are compared in \citep{signorelli_geoscientific_2004}. In the SIA norm, surface temperatures are approximated from the mean annual $T_\mathit{amb}$ by adding an altitude-dependent term \cite{sia_sondes_2010}. As the actual $T_0$ may deviate from the approximated value (see Fig.~\ref{fig:T_ground}), the SIA norm instructs the addition of a tolerance ($\Delta T$) of $- 1$ K for heating and $+1$ K for cooling. While not being directly used in the analytical model of GSHPs, the (undisturbed) \textit{geothermal heat flow} $\dot{q}_{g}$ plays an important role in the quantification of geothermal resources, as it characterises the natural heat flow in the ground. It is given by \cite{huber_bodentemperaturen_2014}: \begin{equation} \label{eq:heat_flux} \dot{q}_{g} = \lambda * \frac{\delta T}{\delta z} \end{equation} The geothermal heat flow is nearly constant in the outer crust of the earth, but may vary in mountain terrain \citep{huber_bodentemperaturen_2014}. Eq.~\eqref{eq:heat_flux} may be used to compute $\lambda$ or $ \frac{\delta T}{\delta z}$ if other data is unavailable. \begin{figure}[tb] \centering \includegraphics[width=0.6\linewidth]{Figs/BHE_tech.png} \caption[Cross-section of a duplex BHE layout. Source: \citet{pahud_geothermal_2002}]{Cross-section of a duplex BHE layout. Source: \citet{pahud_geothermal_2002}.} \label{fig:BHE_cross-sec} \end{figure} \textbf{Technical parameters}. The technical parameters required to determine the technical geothermal potential are set to constant values in the literature (e.g. \cite{miglani_methodology_2018,rivera_increased_2017,zhang_critical_2017}). The $T_\mathit{mf,min}$ is set in most reviewed studies to $-1.5$ °C \cite{sia_sondes_2010, vdi_vdi_2019}. For $T_\mathit{mf,max}$, no unique value exists across the literature. Instead, \citet{kavanaugh_geothermal_2014} suggest to choose a $T_\mathit{mf,max}$ of $11 - 18$ °C above $T_g$, while \citet{pahud_geothermal_2002} mentions a $T_\mathit{mf,max}$ of 50 °C. % \cite{spitler_vertical_2016}. Quantifying the temperature drop inside the borehole further requires knowledge of the \textit{borehole radius} ($r_b$) as well as the \textit{effective thermal resistance} of the borehole ($R_b$). A typical layout of a duplex system, the most common form of BHE installations \cite{sia_sondes_2010,pahud_geothermal_2002}, is shown in Fig.~\ref{fig:BHE_cross-sec}. The $R_b$ is determined by the materials and the geometry of the BHE, which are analysed in detail in \citep{huber_erdwarmesonden_2005}. Table~\ref{tab:tech_design_params} summarises the range of parameters found in the literature and the SIA norm values (if applicable). \begin{table}[tb] \footnotesize \caption[Technical and design parameters for shallow GSHPs]{Technical and design parameters. Norm values are given in \citep{sia_sondes_2010}, while other parameters are obtained from related studies \citep{pahud_geothermal_2002, wagner_erdwarmesonden._2019, claesson_conductive_1988}.} \centering % \resizebox{\textwidth}{!}{% \begin{tabular}{lllll} \hline \textbf{Symbol} & \textbf{Unit} & \textbf{Description} & \textbf{Formula} & \textbf{Norm value / range (CH)} \\ \hline $T_\mathit{mf, min}$ & °C & Minimum mean fluid temperature & $T_{mf} = \frac{T_{in} + T_{out}}{2}$ & \textit{Norm}: $- 1.5$ \\ $r_b$ & m & Borehole radius & & $0.055-0.07$ \\ $R_b$ & mK/W & Effective borehole resistance & & $0.08-0.1$ \\ \hline $H$ & m & Borehole depth & & \textit{Norm}: $100$ (mostly $50-200$) \\ $D$ & m & Distance between $z=0$ and BHE outlet & & $2-5$ \\ $B$ & m & Spacing between BHEs & & $>5$ (no effect for B $>$ H) \\ \hline $Q_{HP}$ & W & Heat extraction power during operation & & $4500-8400$ \\ $q_\mathit{max}$ & W/m & Maximum heat extraction rate & $q_\mathit{max} = \frac{Q_{HP}}{H}$ & \textit{Norm} ($q_\mathit{nom}$): $20-55$ \\ $t_{op}$ & h & Annual operation time & & \textit{Norm}: $1850$ \\ $t_\mathit{seas}$ & years & Periodicity of seasonal heat extraction & & 1 \\ $t_\mathit{peak}$ & h & Duration of maximum extraction pulse & & $1 - 10$ days \\ \hline $t_\mathit{dim}$ & years & Planning horizon for dimensioning the BHE & & $50$ \\ \hline \end{tabular} % } \label{tab:tech_design_params} \end{table} \textbf{Design parameters}. Two groups of design parameters are relevant to quantify the effects of the geometry of a BHE field on the temperature in the ground, which are typically assessed after a planning horizon $t_\mathit{dim}$. The first parameter group describes the borehole geometry. This includes the \textit{borehole depth} ($H$), the horizontal \textit{BHE spacing} ($B$) and the distance between the top part of the BHE (where heat is extracted) and the surface ($D$). A second group of design parameters is related to the heat extraction from the borehole. It includes the \textit{heat extraction power} ($Q_{HP}$), the \textit{maximum heat extraction rate} ($q_\mathit{max}$), the \textit{operating time} ($t_{op}$) and the \textit{duration of maximum operation} ($t_\mathit{peak}$). The heat extraction power is related to the power rating of the heat pump (HP), whose typical values are shown in Table~\ref{tab:tech_design_params}. % In a large-scale study, however, $Q_{HP}$ is not a technical parameter, as the number of heat pumps in the system is undefined. The heat extraction rate is computed from $Q_{HP}$ and $H$ as shown in Table~\ref{tab:tech_design_params}. Nominal curves for $q_\mathit{max}$ as a function of $\lambda$ and $\rho C$, referred to as nominal heat extraction rate $q_\mathit{nom}$, are provided in \cite{sia_sondes_2010}. These curves can be approximated as (cf. \cite{sia_sondes_2010}): \begin{equation} \label{eq:q_nom} q_\mathit{nom} \approx \frac{T_g - T_\mathit{mf, min}}{11.5} \ \left( 10.6 \lambda + 11.2 + 2 \left( \frac{\lambda}{\alpha} - 2 \right) \right) \end{equation} The operating time indicates the number of full-load hours in which the heat pump is operating (with power $Q_{HP}$). The norm values, used throughout this work, depend on altitude and location \cite{sia_sondes_2010}. The $t_\mathit{peak}$ measures the maximum time of non-stop operation of the HP, which impacts the maximum temperature drop in the BHE and is usually taken as 1, 5 or 10 days~\cite{pahud_geothermal_2002}. \subsection{Temperature profile of a BHE installation} \label{model_intro} To quantify the temperature field of a BHE, we distinguish between the processes inside and outside the BHE. The temperature drop inside the BHE, i.e. between the heat carrier fluid at mean temperature $T_{mf}$ and the borehole wall at temperature $T_b$, is a function of the heat extraction rate and the effective thermal resistance of the borehole, such that \citep{claesson_conductive_1988}: \begin{equation} \label{eq:T_b} T_b(t) - T_{mf}(t) = q_\mathit{max}*R_b \end{equation} The temperature at the borehole wall $T_b$ differs from the undisturbed ground temperature $T_g$ by a temperature drop $\Delta T_b$. It varies with depth ($z$), so the average value along the borehole may be obtained by numerical integration ($z = \overline{z}$) or, in a simplified way, by taking $z = H/2$. The borehole wall temperature $T_b$, with $r = r_b$, is hence computed as: \begin{equation} T_b(z, t) = T_g(z) - \Delta T_b(z, t) \end{equation} \begin{figure} \centering \includegraphics[width=.6\linewidth]{Figs/q_seasonal.png} \caption[Simplified heat extraction rate evolution for a typical year. Source: \citet{pahud_geothermal_2002}]{Simplified heat extraction rate for a typical year (constant + periodic + pulse). Source: \citet{pahud_geothermal_2002}.} \label{fig:q_seasonal} \end{figure} The analytical model proposed by \citet{eskilson_thermal_1987} is based on the principles of temporal and spatial superposition, which assumes that $\Delta T_b$ is the sum of the temperature drops due to heat extraction pulses of different duration and from any neighbouring borehole, whereby boreholes at distances greater than $H/2$ can be neglected \cite{pahud_geothermal_2002}. The principle of temporal superposition implies that $\Delta T_b(z, t)$ can be modelled as a long-term (\textit{LT}), a seasonal (\textit{seas}) and a short-term (\textit{peak}) component \citep{claesson_conductive_1988}, as shown in Fig.~\ref{fig:q_seasonal}. Long-term effects are represented by a constant heat extraction rate $\overline{q}$, which is typically assessed for a planning horizon ($t_\mathit{dim}$) of 20 or 50 years \cite{pahud_geothermal_2002,miglani_methodology_2018}. Seasonal effects are represented as a sinusoidal heat extraction with peak rate $q_\mathit{seas}$ and period $t_\mathit{seas}$ of 1 year. The peak extraction, which occurs at maximum seasonal extraction, has a duration $t_\mathit{peak}$ (typically 1-10 days). The energy extracted from this pulse is neglected \cite{claesson_conductive_1988}. To obtain $\Delta T_b$, each heat extraction rate is multiplied with a respective thermal resistance ($R_{LT},R_\mathit{seas},R_\mathit{peak}$) \cite{claesson_conductive_1988}: \begin{equation} \label{eq:dT_b} \textstyle \Delta T_b(z, t) = \overline{q} * R_{LT}(z, t) + q_\mathit{seas} * R_\mathit{seas}(z,t) + q_\mathit{peak} * R_\mathit{peak}(z, t) \end{equation} where \begin{equation*} \overline{q} = \frac{t_{op}}{365*24} * q_\mathit{max}, \quad q_\mathit{seas} = w_\mathit{seas} * q_\mathit{max}, \quad q_\mathit{peak} = q_\mathit{max} - \overline{q} - q_\mathit{seas} \end{equation*} and $w_\mathit{seas}$ is the seasonal system load, given as a dimensionless constant. Equation~\eqref{eq:dT_b} can be used to simulate the temperature variation at the borehole wall for any time $t$. For GSHP system sizing and to assess the potential long-term heat extraction from the ground, the worst-case $\Delta T_b$ and consequently the lowest $T_{mf}$ (or highest, for heat injection) must be calculated and substituted in Eqs.~\eqref{eq:T_mf_min} (heat extraction) or \eqref{eq:T_mf_max} (heat injection). To obtain the $T_{mf}$, the temperature drop along the entire borehole is of interest, denoted as $H$ (the borehole depth). Using this notation, a combination of Eqs.~\eqref{eq:T_b}-\eqref{eq:dT_b} gives the following equation for the lowest/highest $T_{mf}$: \begin{equation} \label{eq:T_mf} T_{mf}(t) = \textstyle T_g\left(\frac{H}{2}\right) - \overline{q} * R_{LT}(H, t_\mathit{dim}) - q_\mathit{seas} * R_\mathit{seas}(t_\mathit{seas}) - q_\mathit{peak} * R_\mathit{peak}(t_\mathit{peak}) - q_\mathit{max}*R_b \end{equation} For reasons explained in Section~\ref{app:models}, only the $R_{LT}$ is a function of $H$. Equation \eqref{eq:T_mf} is valid for the cases of heat extraction (e.g. space heating) and heat injection (e.g. space cooling). The difference between the two modes of operation is that the heat extraction rates ($q$) are positive for heat extraction and negative for heat injection and that their magnitude varies with the length of the heating/cooling seasons and the respective demand. The principle of spatial superposition implies that the temperature drops due to the heat extraction from neighbouring BHEs are added to the temperature drop at the borehole wall. % As the seasonal and peak effects are of a short duration and have a penetration radius of less than the minimum borehole distance $B_\mathit{min} = 5$ m \cite{pahud_geothermal_2002}, the seasonal and peak components of neighboring boreholes hence do not interfere with each other. % The long-term temperature drop ($\Delta T_{LT}$), however, may be impacted by surrounding boreholes. Fig.~\ref{fig:T_field} shows the long-term $\Delta T$ for different times, (a) at the borehole wall as a function of $z$ and (b) integrated along $z$ (denoted as $\overline{z}$ in Fig.~\ref{fig:T_field}b) as a function of distance to the borehole (logarithmic scale). Most of the temperature drop occurs during the first 10 years of operation, while the temperature drop after the planning horizon of 50 years is small. \begin{figure} \centering \includegraphics[width=.9\linewidth,trim={0 .6cm 0 .7cm},clip]{Figs/temp_field_FLS.pdf} \begin{subfigure}[t]{.42\textwidth} \centering \subcaption{} \label{figa:T_field} \end{subfigure} \begin{subfigure}[t]{.45\textwidth} \centering \subcaption{} \label{figb:T_field} \end{subfigure} \caption[Long-term variation of the ground temperature at the borehole wall as a function of depth and radial distance]{a) Long-term variation of the ground temperature at the borehole wall ($\Delta T_{LT}(r_b, z, t)$) as a function of depth ($z$), b) Integration of $\Delta T_{LT}(r_b, z, t)$ along $z$ ($\Delta T_{LT}(r, \overline{z}, t)$) as a function of distance to the BHE ($r$).} \label{fig:T_field} \end{figure} In the literature, different variations of Eq.~\eqref{eq:T_mf} exist, which are typically formulated to yield the optimal borehole length for a specific heat load~\cite{kavanaugh_geothermal_2014,sia_sondes_2010}. A comprehensive overview of existing design methods for vertical closed-loop BHEs is provided by \citet{spitler_vertical_2016}. \subsection{Analytical models for thermal resistance} \label{geo_models} \label{app:models} In the model of \citet{claesson_conductive_1988}, the heat transfer of a BHE is modelled with good accuracy as a purely conductive process in a homogeneous medium. This implies that the thermal conductivity $\lambda$ of stratified ground with multiple layers may be approximated as a weighted average of the properties of the layers. \citet{claesson_conductive_1988} also argue that the undisturbed ground temperature $T_g(z)$ along the borehole can be approximated without loss of accuracy by the undisturbed ground temperature at half the borehole depth. To ease the notation for the computation of thermal resistances ($R$), \citet{eskilson_thermal_1987} introduced the concept of \textit{g-functions}. They are are dimensionless step-response functions characterizing the thermodynamic behaviour of the ground, from which $R$ is obtained as \cite{eskilson_thermal_1987}: \begin{equation} R = \frac{1}{2 \pi \lambda} \, \mathrm{g}\left( \frac{t}{t_s}, \frac{r}{H} \right), \quad \frac{t}{t_s} > 0 \end{equation} The g-function is a function of the ratio between the time $t$ and the BHE's time constant $t_s$, as well as the ratio between the horizontal distance to the BHE center $r$ ($r_b$ for a single borehole) and the borehole depth $H$. The ratio $t/t_s$ may also be referred to as Eskilson's number (Es) \citep{pahud_geothermal_2002}. The time constant $t_s$, typically between $35-140$ years, is defined as: \begin{equation} \label{eq:t_s} t_s = \frac{H^2}{9 \alpha} \end{equation} The time constant marks the transition from the transient state, in which ground temperatures are decreasing logarithmically, to the steady state, which represents the new thermal equilibrium of the ground with a constant heat extraction (red lines in Fig.~\ref{fig:T_field}). Since the dimensioning horizon of BHEs lies typically below $t_s$, the transient state solutions for the heat transfer equation are of primary interest for the modelling of BHEs. A complete overview of transient and steady-state models is provided by \citet{pahud_geothermal_2002}. \subsubsection{Finite Line Source (FLS)} The BHE is modelled by \citet{claesson_conductive_1988} as a finite line source (FLS) of length $H$. The analytical model is a solution to the heat conduction equation that satisfies the boundary conditions $T(r, z=0, t=0) = 0$. The temperature changes along the BHE are modelled by the integral of the FLS solution along the vertical axis ($z$). A computationally efficient solution for this integral (in transient state) has been proposed by \citet{claesson_analytical_2011}: \begin{equation} \label{eq:FLS_int} g_{FLS}(r, H) = \frac{1}{2} \int_{\frac{1}{\sqrt{4 \alpha t}}}^{\infty} e^{- r^2 s^2} \ \frac{I_{ls}(Hs, Ds)}{H s^2} \ ds \end{equation} where \begin{equation*} I_{ls}(h, d) = 2\ \mathrm{ierf}(h) + 2\ \mathrm{ierf}(h + 2d) - \mathrm{ierf}(2h + 2d) - \mathrm{ierf}(2d) \end{equation*} \begin{equation*} \mathrm{ierf}(x) = \int_0^x \mathrm{erf}(u) du = x \ \mathrm{erf}(x) - \frac{1}{\sqrt{\pi}} (1 - e^{-x^2}) \qquad \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_0^x e^{-\mu^2} d \mu \end{equation*} and $t$ equals the planning horizon $t_\mathit{dim}$. $D$ is the distance between the BHE outlet and the ground surface, which is set to $D = 2$ m as suggested in \cite{pahud_geothermal_2002}. The transient and steady-state solutions of the FLS model at any depth $z$ are provided in Appendix~\ref{app:allModels}. \subsubsection{Approximations and errors} The FLS can be simplified under certain assumptions by simpler models, namely the infinite line source model (ILS) and an asymptotic approximation of the FLS model, which are described in Appendix~\ref{app:allModels}. These models, used in the literature for example in \cite{alcaraz_advection_2016,alcaraz_t-i-ger_2017,casasso_g.pot:_2016,bayer_strategic_2014}, show a negligible deviation from the FLS model for short time spans ($< 1$ year) and are hence well-suited for modelling seasonal and peak effects (see Appendix~\ref{app:allModels} for details). % For larger time spans of several years, the deviation of the approximations from the FLS solution becomes non-negligible. This leads to a significant accumulation of errors when superposition is applied. Throughout this work, the FLS is hence used to model long-term thermal resistances, similar to other studies accounting for spatial superposition \cite{miglani_methodology_2018,rivera_increased_2017}. \subsubsection{Long-term, seasonal and peak effects} \label{seas_peak} Based on the analytical models described above, mathematical formulations for the thermal resistance of the peak, seasonal and long-term components shown in Fig.~\ref{fig:q_seasonal} ($R_{LT},R_\mathit{seas},R_\mathit{peak}$) can be derived. \textbf{Long-term effects} are assessed at $t = t_\mathit{dim}$ using the FLS solution for an accurate model of the spatial superposition \cite{miglani_methodology_2018,rivera_increased_2017}. Applying the principle of spatial superposition, the long-term resistance of borehole $i$ surrounded by $N$ boreholes may be expressed as (cf. \cite{claesson_analytical_2011}): \begin{equation} \label{eq:R_LT_superposed} R_{LT, i}(H) = \frac{1}{2 \pi \lambda} \mathrm{g}_{FLS}(r_b, H) + \frac{1}{2 \pi \lambda} \sum_{j=1}^N \mathrm{g}_{FLS}(r_{i,j}, H) \end{equation} where $i \neq j$, $\mathrm{g}(r, H)$ is obtained from Eq.~\eqref{eq:FLS_int} and $r_{i,j}$ denotes the distance between borehole $i$ and the surrounding borehole $j$. If a single borehole is considered, only the first term of the addition is relevant, yielding the definition of $R_{LT}$ used in Chapter~\ref{geothermal}. To ease the notation, the argument $t = t_\mathit{dim}$ has been omitted from Eq.~\eqref{eq:R_LT_superposed}. To efficiently compute $R_{LT}$ for a large number of BHE installations, the integrand in Eq.~\eqref{eq:FLS_int} is pre-computed in this work for a range of combinations of $\alpha$ and $r$, exploiting the geometrical properties of BHEs arranged in regular grids (see Chapter~\ref{geothermal}). \textbf{Seasonal effects} are modelled in this work as a periodic heat extraction. Effects from neighboring boreholes can be ignored if the minimum spacing ($B_\mathit{min}$) fulfills the following condition: \begin{equation} B_\mathit{min} > 0.7 \sqrt{\alpha t_\mathit{seas}} \end{equation} As the minimum spacing is defined as 5 m in the SIA norm \cite{sia_sondes_2010}, this criterion is fulfilled for the period of the seasonal variation ($t_\mathit{seas} = 1$ year). The thermal diffusivities in Switzerland (see Table~\ref{tab:phys_params}) result in $B_\mathit{min} \approx 3.5-4.5$ m. The maximum periodic thermal resistance is given by \citep{claesson_conductive_1988, pahud_geothermal_2002}: \begin{equation} R_\mathit{seas}(t_\mathit{seas}) = \frac{1}{2 \pi \lambda} \sqrt{\left(\ln(2/r_{pb}^\prime \right) - \gamma)^2 + \pi^2/16} \end{equation} where \begin{equation*} r_{pb}^\prime = r_b \sqrt{2}/\delta < 0.1, \quad \delta = \sqrt{ \alpha t_\mathit{seas} / \pi} \end{equation*} and $\gamma$ is Euler's constant (0.5772). The $R_\mathit{seas}$ is based on the ILS model, and hence it is independent of the borehole depth $H$. The (horizontal) penetration depth of the temperature drop is denoted as $\delta$, which is around $3-4$ m for Switzerland. Seasonal effects hence do not impact adjacent BHEs. \textbf{Short-term effects} are represented by a heat extraction pulse at $q_\mathit{max}$ for a duration $t_\mathit{peak}$, typically 1-10 days (see Table~\ref{tab:tech_design_params}). Due to the short duration of the pulse, the ILS approximation is valid with negligible error (see Appendix \ref{app:allModels}). Thermal effects have an even smaller penetration depth than seasonal effects, so no surrounding boreholes need to be considered. The thermal resistance can hence be obtained as: \begin{equation} R_\mathit{peak}(t_\mathit{peak}) = \frac{1}{2 \pi \lambda} \ \mathrm{g}_{ILS}(r_b, t_\mathit{peak}) \end{equation} where $\mathrm{g}_{ILS}$ is obtained from Appendix \ref{app:allModels}. While peak effects are relevant when modelling the operation of GSHP systems \cite{miglani_methodology_2018}, they are frequently neglected in studies of the long-term technical geothermal potential, as a back-up heating system is available in most cases to cover peak extractions that may violate the temperature constraint (Eq.~\eqref{eq:T_mf_min}). I follow this approach and neglect peak heat extraction throughout this work.
{ "alphanum_fraction": 0.7562770906, "avg_line_length": 100.6276595745, "ext": "tex", "hexsha": "704a18f68ed42119231a7cad41ba17ff72788a5a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "aw1513/EPFL_thesis_template", "max_forks_repo_path": "main/ch2_methods.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "aw1513/EPFL_thesis_template", "max_issues_repo_path": "main/ch2_methods.tex", "max_line_length": 940, "max_stars_count": null, "max_stars_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "aw1513/EPFL_thesis_template", "max_stars_repo_path": "main/ch2_methods.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 20606, "size": 75672 }
\subsection{Faraday's Law of Induction \& Ampere's Law} \noindent Faraday’s Law of Induction quantifies the idea that changing magnetic flux in a coil induces a current in the coil. More precisely, it induces a voltage, the potential function of electric field $\left(\vec{E} = \nabla V\right)$, and this field will oppose the magnetic field that induced it. More formally, \begin{equation*} -\frac{\partial}{\partial t}\iint\limits_{S}{\vec{B} \cdot \mathrm{d}\vec{s}} = \oint\limits_{C}{\vec{E} \cdot \mathrm{d}\vec{r}} \end{equation*} \begin{equation*} -\iint\limits_{S}{\frac{\partial}{\partial t}\vec{B} \cdot \mathrm{d}\vec{s}} = \iint\limits_{S}{\nabla \times \vec{E} \cdot \mathrm{d}\vec{s}} \end{equation*} by Stokes's Theorem.\\ As $S$ collapses to a point, \begin{equation*} \nabla \times \vec{E} = -\frac{\partial}{\partial t}\vec{B}. \end{equation*} This is Faraday's Law. It is the 3rd of Maxwell's Laws.\\ \noindent The final of Maxwell's Equations is Ampere's Law (with Maxwell's correction). It says that \begin{equation*} \nabla \times \vec{B} = \mu_0\epsilon_0\frac{\partial\vec{e}}{\partial t} + \mu_0J \end{equation*} where $J$ is the current density and $\mu_0$ is the permeability of free space. Using Maxwell's equations and some basic properties of waves, we can derive the speed of light as $c = \frac{1}{\sqrt{\mu_0\epsilon_0}}$.
{ "alphanum_fraction": 0.7001445087, "avg_line_length": 62.9090909091, "ext": "tex", "hexsha": "9e3406f5223b817cbba2ae465aa04143b6479a66", "lang": "TeX", "max_forks_count": 10, "max_forks_repo_forks_event_max_datetime": "2021-08-17T15:21:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-10T05:41:17.000Z", "max_forks_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "aneziac/Math-Summaries", "max_forks_repo_path": "multiCalc/vectorAnalysis/faradaysLawInductionAmperesLaw.tex", "max_issues_count": 26, "max_issues_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_issues_repo_issues_event_max_datetime": "2021-10-07T04:47:03.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-28T17:44:18.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "aneziac/Math-Summaries", "max_issues_repo_path": "multiCalc/vectorAnalysis/faradaysLawInductionAmperesLaw.tex", "max_line_length": 308, "max_stars_count": 39, "max_stars_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "aneziac/Math-Summaries", "max_stars_repo_path": "multiCalc/vectorAnalysis/faradaysLawInductionAmperesLaw.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:38:45.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-26T06:20:36.000Z", "num_tokens": 460, "size": 1384 }
\chapter{Computers} We recommend that you skip this chapter until you use a computer to do physics calculations. \section{Working with limited precision} Computers can only store finitely many digits. Computers can only represent a \emph{finite} subset of \(\Real\). Computers can't represent most real numbers. The take-home message is: \emph{Mixing numbers with varying exponents increases errors}. To understand these phenomena, we have to understand how computers represent numbers. \section{Understanding how computers represent numbers} There are many ways: \UnorderedList{ \item floating-point integers \item fixed-point integers \item arbitrary-precision integers \item symbolic representations \item one's-complement signed integers \item two's-complement signed integers \item unsigned integers \item sign-magnitude } The representation depends on the program. Each representation has its benefits and drawbacks. If you are doing physics with computers, then there's a high chance that the numbers you see are \emph{IEEE 754 double-precision floating-point integers}, which are often shortened to \emph{doubles}.% \footnote{\url{https://en.wikipedia.org/wiki/Double-precision_floating-point_format}} \footnote{\url{https://en.wikipedia.org/wiki/Binary_number}}% \footnote{\url{https://en.wikipedia.org/wiki/Signed_number_representations}} \subsection{Understanding \emph{doubles}} A \emph{double} represents a number as the base-2 scientific notation \( (-1)^s \times 1 . d_{51} d_{50} d_{49} \ldots d_2 d_1 d_0 \times 2^{p - 1023} \) Note that the first significant digit is always one and is not stored in memory. Each \(d_k\) is a binary digit (zero or one). Bit 63 is the sign bit \(s\): 0 means positive; 1 means negative. Bits 62\textendash{}52 (11 bits) are the \emph{biased exponent} \(p\). Bits 51\textendash{}0 (52 bits) are called the \emph{significand} or \emph{mantissa}. Bit 51 is \(d_{51}\), and so on; bit 0 is \(d_0\). \subsection{Understanding the problem with doubles} If a fraction has a denominator that is not a power of two, then a double can't represent the fraction exactly. For example, a double can't represent even a simple fraction such as \(1/3\) exactly because the base-2 expansion of \(1/3\) doesn't terminate, in the same way that the decimal expansion of \(1/3\) (\(0.333\ldots\)) doesn't terminate. A double has only 53 significant binary digits. A multiplication of two 53-digit numbers may produce up to a 106-digit number. The result is rounded to 53 digits; thus at most 53 digits are lost. There is also a website\footnote{\url{http://floating-point-gui.de/}} that explains the issue. \subsection{Understanding computer algebra systems} Computer algebra systems can represent numbers like \(\sqrt{2}\) and \(1/3\) exactly because it does not represent numbers as strings of digits. It stores \(\sqrt{2}\) as something like \verb@(sqrt 2)@. It does not evaluate \(\sqrt{2}\) to \(1.4142\ldots\) before storing it. \subsection{Understanding ulp: units of least precision} The standard is IEEE 754. For example, in IEEE 754 double-precision floating-point integers, \( 2^{53} + 1 = 2^{53} \). In some browsers, you can verify this. Press Ctrl+Shift+J to open its Console, and then enter \[email protected](2,53)@, and then enter \[email protected](2,53)+1@, and see that they give the same number. \section{Solving a system of linear equations} You can use GNU Octave. Use left division: \begin{verbatim} A \ C \end{verbatim} This also works for Matlab. \section{Related fields of study} Related fields of study are \emph{scientific computing} (also known as \emph{computational science}) and \emph{numerical analysis}.% \footnote{\url{https://en.wikipedia.org/wiki/Computational_science}}% \footnote{\url{https://en.wikipedia.org/wiki/Numerical_analysis}} \section{Using an equation to program a computer}
{ "alphanum_fraction": 0.7613402062, "avg_line_length": 34.3362831858, "ext": "tex", "hexsha": "6d598b75e104854604f53e015712185f75809ac1", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-10-02T15:20:22.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-02T15:20:22.000Z", "max_forks_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_forks_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_forks_repo_name": "edom/work", "max_forks_repo_path": "research/physics/computer.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_issues_repo_issues_event_max_datetime": "2022-02-16T00:55:32.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-02T18:37:37.000Z", "max_issues_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_issues_repo_name": "edom/work", "max_issues_repo_path": "research/physics/computer.tex", "max_line_length": 113, "max_stars_count": null, "max_stars_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_stars_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_stars_repo_name": "edom/work", "max_stars_repo_path": "research/physics/computer.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1004, "size": 3880 }
\subsection{Gradient Properties} \noindent Let $f$ and $g$ be functions of multiple variables, let $\vec{r}$ be a VVF, and let $c \in \mathbb{R}$. \begin{enumerate} \item $\nabla(f \pm g) = \nabla f \pm \nabla g$ \item $\nabla(cf) = c\nabla f$ \item $\nabla(fg) = f\nabla g + g\nabla f$ \item $\nabla(f\circ\vec{r}(t)) = \nabla f \cdot \vec{r}(t)$ \end{enumerate}
{ "alphanum_fraction": 0.6213333333, "avg_line_length": 41.6666666667, "ext": "tex", "hexsha": "56202a671f00cb0b8fb103e9b3baa542fbe2e679", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3ad58ef55c176f7ebaf145144e0a4eb720ebde86", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "rawsh/Math-Summaries", "max_forks_repo_path": "multiCalc/differentialMultivariableCalculus/gradientProperties.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3ad58ef55c176f7ebaf145144e0a4eb720ebde86", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "rawsh/Math-Summaries", "max_issues_repo_path": "multiCalc/differentialMultivariableCalculus/gradientProperties.tex", "max_line_length": 104, "max_stars_count": null, "max_stars_repo_head_hexsha": "3ad58ef55c176f7ebaf145144e0a4eb720ebde86", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "rawsh/Math-Summaries", "max_stars_repo_path": "multiCalc/differentialMultivariableCalculus/gradientProperties.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 156, "size": 375 }
% %% Copyright (c) Members of the EGEE Collaboration. 2004-2010. %% See http://www.eu-egee.org/partners for details on the copyright holders. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, software %% distributed under the License is distributed on an "AS IS" BASIS, %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. % % -*- mode: latex -*- \section{Introduction} This document serves as a developer's guide and could be seen as an API reference too, even though comments in the header files may give the reader better insights into that matter. Common Authentication Library (\CANL for short) was designed to provide common security layer support in grid applications. It is largely based on existing code (VOMS, LB). Its simple API can be devided by functionality into two parts: \begin{itemize} \item \textit{\CANL Main API} is used to establish (secure) client-server connection with one or both sides authenticated, send or receive data. As will be described in~\ref{s:cs-auth-conn}, most of the \textit{Main API} is not directly dependent on some chosen cryptography toolkit (SSL implementation). It is also internally plugin-based and therefore other security mechanisms support can be added in future. \item \textit{\CANL Certificate API} allows certificate and proxy management \eg proxy creation, signing, etc. We may think of \textit{Certificate API} as the second level of \textit{Main API} \end{itemize} Currently there is EMI Product Team assigned to \CANL development with three subgroups for each language binding. \subsection{Language Bindings} \CANL is developed in C language as well as C++ and Java language bindings, however this document covers only the C interface. \subsection{Getting and Building Library} TODO package names external dependencies: \begin{itemize} \item c-ares -- asynchronous resolver library \item openssl -- cryptography and SSL/TLS toolkit \end{itemize} \subsection{General Guidelines} \marginpar{Naming conventions}% All function names are prefixed with \verb'canl_' \marginpar{Input and output arguments}% All structures and objects passed in output of functions (even though pointers are used as a help) are dynamically allocated, so proper functions to free the allocated memory has to be called. e.g. \verb'canl_free_ctx()' deallocates members of the structure \verb'canl_ctx'. \marginpar{Opaque types}% Almost all types used in caNl are \textit{Opaque types} -- i.e. their structure is not exposed to users. To use and/or modify these structures API call has to be used. Example of opaque type is {\tt canl\_ctx}. \marginpar{Return values}% The return type of most of the API functions is {\tt canl\_err\_code} which in most cases can be interpreted as int. Unless specified otherwise, zero return value means success, non-zero failure. Standard error codes from {\tt errno.h} are used as much as possible. Few API functions return {\tt char *}. In such a~case {\tt NULL} indicates an error, non-null value means success. \subsection{Context and Parameter Settings} \label{s:context} All the API functions use a \emph{context} parameter of type {\tt canl\_ctx} to maintain state information like error message and code. Some API functions also use an \emph{io context} of type {\tt canl\_io\_handler} which keeps information about each particular connection (\eg socket number, oid, SSL context).The caller can create as many contexts as needed, all of them will be independent. When calling \verb'canl_create_ctx()' or \verb'canl_create_io_handler()' all members of the objects are initialized with default values which are often NULL for pointer type and 0 in case of int and similar types. \section{\CANL Components} \label{s:common} \subsection{Header Files} Header files for the common structures and functions are summarized in table~\ref{t:cheaders}. \begin{table}[h] \begin{tabularx}{\textwidth}{>{\tt}lX} canl.h & Definition of context objects and \textit{Main API} common functions declarations. \\ canl\_ssl.h & Declaration of functions that use X509 certificates based authentication mechanism (pretty much dependent on openssl library functions).\\ canl\_cred.h & Definition of context objects of the \textit{Certificate API} and functions declarations.\\ \end{tabularx} \caption{Header files} \label{t:cheaders} \end{table} \subsection{Building Client Programs} The easiest way to build programs using \CANL in C is to use GNU's libtool to take care of all the dependencies: \begin{verbatim} libtool --mode=compile gcc -c example1.c -D_GNU_SOURCE libtool --mode=link gcc -o example1 example1.o -lcanl_c \end{verbatim} \subsection{Context} \label{s:canl_ctx} \marginpar{Context initialization}% There are two opaque data structures representing caNl \textit{Main API} context: {\tt canl\_ctx} and {\tt canl\_io\_handler} (see section~\ref{s:context}). {\tt canl\_ctx} must be initialized before any caNl API call. {\tt canl\_io\_handler} must be initialized before calling function representing io operation (\eg \verb'canl_io_connect()') and after {\tt canl\_ctx} initialization. \begin{lstlisting} #include <canl.h> #include <canl_ssl.h> canl_io_handler my_io_h = NULL; canl_ctx my_ctx; my_ctx = canl_create_ctx(); err = canl_create_io_handler(my_ctx, &my_io_h); \end{lstlisting} There is one opaque data structure representing \CANL \textit{Certificate API} context: {\tt canl\_cred}. It must only be initialized before function calls that use this context as a parameter. \begin{lstlisting} #include <canl.h> #include <canl_cred.h> canl_ctx ctx; canl_cred c_cred; ctx = canl_create_ctx(); canl_cred_new(ctx, &c_cred); \end{lstlisting} \marginpar{Obtaining error description}% {\tt canl\_ctx} stores details of all errors which has occurred since context initialization, in human readable format. To obtain it use \verb'canl_get_error_message()': \begin{lstlisting} printf("%s\n", canl_get_error_message(my_ctx)); \end{lstlisting} \marginpar{Context deallocation}% It is recommended to free the memory allocated to each context if they are not needed anymore, in first case {\tt canl\_io\_handler} , then {\tt canl\_ctx} in case of the \textit{Main API}: \begin{lstlisting} if (my_io_h) canl_io_destroy(my_ctx, my_io_h); canl_free_ctx(my_ctx); \end{lstlisting} as for the Certificate API: \begin{lstlisting} canl_cred_free(ctx, c_cred); \end{lstlisting}
{ "alphanum_fraction": 0.7738500222, "avg_line_length": 37.5611111111, "ext": "tex", "hexsha": "bdd6aeecc39842d22c619015bacad5a897e458b8", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-09-16T09:51:04.000Z", "max_forks_repo_forks_event_min_datetime": "2021-09-16T09:51:04.000Z", "max_forks_repo_head_hexsha": "a0ce6b7dd87cfee7c2cc9b2e2b16d0961aef58da", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "ellert/canl-c", "max_forks_repo_path": "doc/src/canl-introduction.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "a0ce6b7dd87cfee7c2cc9b2e2b16d0961aef58da", "max_issues_repo_issues_event_max_datetime": "2021-05-28T13:01:12.000Z", "max_issues_repo_issues_event_min_datetime": "2015-05-04T07:03:29.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "ellert/canl-c", "max_issues_repo_path": "doc/src/canl-introduction.tex", "max_line_length": 83, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a0ce6b7dd87cfee7c2cc9b2e2b16d0961aef58da", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "ellert/canl-c", "max_stars_repo_path": "doc/src/canl-introduction.tex", "max_stars_repo_stars_event_max_datetime": "2015-03-13T17:40:38.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-13T17:40:38.000Z", "num_tokens": 1693, "size": 6761 }
%!TEX root = Calculus_I.tex \chapter{Key Concept: The Definite Integral} \section{How Do We Measure Distance Traveled?} The \textbf{\textit{distance}} formula is commonly referred to as: % \begin{equation} \text{Distance} = \text{Velocity} \cdot \text{Time} \end{equation} % Recall that \textbf{\textit{velocity}} has both magnitude \textit{and} direction. Thus, it is important to know what direction indicates positive \textbf{\textit{velocity}}. This section will estimate \textbf{\textit{distance}} when \textbf{\textit{velocity}} is time-varying. \vspace{0.1in} When estimating the \textbf{\textit{distance}} traveled, it is important to know how often the \textbf{\textit{velocity}} measurements are taken. For example, consider this table with measurements taken every 2 seconds: % \begin{table} \begin{center} \begin{tabular}{ccccccc} \hline Time (sec) & 0 & 2 & 4 & 6 & 8 & 10\\ \hline Velocity $\left(\frac{\text{ft}}{\text{s}}\right)$ & 20 & 30 & 38 & 44 & 38 & 50\\ \hline \end{tabular} \end{center} \end{table} % Higher frequency \textbf{\textit{velocity}} measurements result in less \textbf{\textit{distance}} estimation error, but an estimate can still be made. This results in: % \begin{equation} 20\cdot2 + 30\cdot2 + 38\cdot2 + 44\cdot2 + 48\cdot2 = 360 \text{feet} \end{equation} % This serves as a lower limit because we know the car has moved at these speeds when it was measured. An upper limit considers the fact that the car instantaneously accelerated to the maximum velocity from the next measurement, yielding: % \begin{equation} 30\cdot2 + 38\cdot2 + 44\cdot2 + 48\cdot2 + 50\cdot2 = 420 \text{feet} \end{equation} % Thus, we can conclude that: % \begin{equation} 360 \leq \text{Total Distance Traveled} \leq 420 \text{feet} \end{equation} % A smaller difference between the upper and lower estimates can be obtained by increasing the measurement frequency. Each time interval between measurements can be represented by a rectangle on the \textit{Time-Velocity}-axes. As the time intervals get smaller, the rectangles become thinner. The \textbf{\textit{limit}} of this is as the time interval approaches zero, the rectangles are infinitesimally thin, and the distance error between the lower and upper estimates approaches zero. It will be shown later that, in the \textbf{\textit{distance}} and \textbf{\textit{velocity}} relationship, the \textbf{\textbf{area under the curve}} of the \textit{Time-Velocity}-axes is equivalent to the \textbf{\textit{total distance}} traveled, if \textbf{\textit{velocity}} is strictly positive. \vspace{0.1in} If \textbf{\textit{velocity}} is ever negative, then the object is traveling back towards the starting position. Thus, its \textbf{\textit{distance}} from the starting position is decreasing, but the \textbf{\textit{total distance}} traveled is increasing. \vspace{0.1in} In the general case, let $v = f(t)$ be a non-negative \textbf{\textit{velocity}} function, $t \geq 0$. One may wish to determine the \textbf{\textit{distance}} traveled between times $a$ and $b$. Measurements are taken at evenly spaced times, $t_0$, $t_1$, ... $t_n$. If $a = t_0$ and $b = t_n$, then the time interval between any two measurements is given by: % \begin{equation} \Delta t = \frac{b - a}{n} \end{equation} % For each time interval, $t_i$, the \textbf{\textit{distance}} traveled is given by: % \begin{equation} \text{Distance} = f\left(t_i\right)\Delta t \end{equation} % Summing all of the distances between each subsequent time interval between $a$ and $b$ yields: % \begin{equation} \label{eq:LHSum} \text{Distance} \approx \sum_{i=0}^{n-1} f\left(t_i\right)\Delta t \end{equation} % This is a \textbf{\textit{Left-Hand Sum}} because it includes all velocities from the left-side of the rectangular intervals. The \textbf{\textit{Right-Hand Sum}} can be written as: % \begin{equation} \label{eq:RHSum} \textbf{Distance} \approx \sum_{i=1}^n f\left(t_i\right)\Delta t \end{equation} % If $f$ is an \textbf{\textit{increasing function}}, then the \textbf{\textit{Left-Hand Sum}} underestimates the \textbf{\textit{total distance}} and \textbf{\textit{Right-Hand Sum}} overestimates it. Conversely, if $f$ is \textbf{\textit{decreasing}}, then the \textbf{\textit{Left-Hand Sum}} overestimates the \textbf{\textit{total distance}} and \textbf{\textit{Right-Hand Sum}} underestimates it. For a \textbf{\textit{monotonically increasing}} or \textbf{\textit{monotonically decreasing}} function, the accuracy of the estimates is given by: % \begin{equation} \text{Error} = |f(b) - f(a)| \cdot \Delta t \end{equation} % \begin{center} \section*{\small Examples} Coming soon$!^{\text{TM}}$ \end{center} \section{The Definite Integral} The \textbf{\textit{Definite Integral}} is defined by taking the \textbf{\textit{limit}} of the \textbf{\textit{Left-Hand Sum}} or \textbf{\textit{Right-Hand Sum}} as the parameter $n$ approaches $+\infty$, provided the function $f(x)$ is \textbf{\textit{continuous}} on $[a, b]$. This can be written as: % \begin{equation} \int_a^b f(x)dx \end{equation} % The summations represented by Equations (\ref{eq:LHSum}) and (\ref{eq:RHSum}) are referred to as \textbf{\textit{Riemann Sums}}. The \textbf{\textit{integrand}} is the function being integrated, $f(x)$, and the \textbf{\textit{limits of integration}} are the endpoints of the interval, $a$ and $b$. \vspace{0.1in} More specifically, Equations (\ref{eq:LHSum}) and (\ref{eq:RHSum}) are special cases of \textbf{\textit{Riemann Sums}}. The general form of the \textbf{\textit{Riemann Sum}} for a function, $f(x), x \in [a, b]$, is given by: % \begin{equation} \sum_{i=1}^n f\left(c_i\right) \Delta t_i \end{equation} % where $a = t_0 < t_1 <$ ... $t_n = b$ and, for $i = 1, 2$ ..., $n$, $\Delta t_i = t_i - t_{i-1}$, and $t_{i-1} \leq c_i \leq t_i$. \vspace{0.1in} The \textbf{\textit{Definite Integral}} approximates the area under the curve down to the $x$-axis by summing the areas of $n$ rectangles in the \textbf{\textit{Riemann Sum}}. When the \textbf{\textit{integrand}} is negative, the distance to the $x$-axis is above the curve. Because as positive sign convention is used, the resulting area above the curve found through \textbf{\textit{integration}} is negative. This is what causes \textbf{\textit{integrations}} such as $\int_0^{2\pi} \sin x$ $dx$ to equal zero. \begin{center} \section*{\small Examples} Coming soon$!^{\text{TM}}$ \end{center} \section{The Fundamental Theorem and Interpretations} The \textbf{\textit{Fundamental Theorem of Calculus}} is written as: \vspace{0.2in} If $f$ is \textbf{\textit{continuous}} on $[a, b]$, and $f(x) = F\prime(x)$, then: % \begin{equation} \int_a^b f(x)dx = F(b) - F(a) \end{equation} % Thus, if a function $f$ is equal to the \textbf{\textit{rate of change}} of a quantity, then the \textbf{\textit{definite integral}} results in the total change. \vspace{0.1in} The \textbf{\textit{integral}} can also be used to approximate the average value of a function, $f$, over a given interval, $[a, b]$: % \begin{equation} \text{Average Value of } f = \frac{1}{b-a}\int_a^bf(x)dx \end{equation} % \vspace{0.1in} Lastly, the \textbf{\textit{Fundamental Theorem of Calculus}} can be used to compute \textbf{\textit{definite integrals}} exactly. \begin{center} \section*{\small Examples} Coming soon$!^{\text{TM}}$ \end{center} \section{Theorems About Definite Integrals} So far, we have only considered the \textbf{\textit{Definite Integral}} when $a < b$. Recall that: % \begin{equation} \int_a^b f(x)dx = \lim_{n \rightarrow \infty} \sum_{i=1}^n f\left(x_i\right) \Delta x \end{equation} % Then, provided $f(x)$ is \textbf{\textit{continuous}}, for any numbers, $a$, $b$, and $c$: % \begin{enumerate} \item $\int_b^a f(x)$ $dx = - \int_a^b f(x)$ $dx$\\ \item $\int_a^c f(x)$ $dx + \int_c^b f(x)$ $dx = \int_a^b f(x)$ $dx$ \end{enumerate} % The first result can be derived from the definition of $\Delta x$, namely: % \begin{equation} \Delta x = \frac{(a - b)}{n} = -\frac{(b - a)}{n} \end{equation} % The second result is true because of the definition of $\Delta x$ and that the upper limit of integration of the first is equal to the lower limit of integration for the second, $c$. \vspace{0.1in} We can also evaluate properties of \textbf{\textit{integrals}} for multiple functions. Suppose $f$ and $g$ are both continuous functions, and $c$ is an arbitrary constant. Then: % \begin{enumerate} \item $\int_a^b \left(f(x) \pm g(x)\right)$ $dx = \int_a^b f(x)$ $dx \pm \int_a^b g(x)$ $dx$\\ \item $\int_a^b c \cdot f(x)$ $dx = c\int_a^b f(x)$ $dx$ \end{enumerate} % These properties hold because of the \textbf{\textit{Principle of Superposition}} and that $c$ is simply a scaling factor. \vspace{0.1in} The area between curves can also be calculated, provided $f(x)$ lies above $g(x)$ for $a \leq x \leq b$: % \begin{equation} \text{Area between $f$ and $g$} = \int_a^b\left(f(x) - g(x)\right)dx \end{equation} % \vspace{0.1in} Symmetry can also be used to aid in the evaluation of \textbf{\textit{integrals}}. For \textbf{\textit{Even Functions}}: % \begin{equation} \int_{-a}^a f(x)dx = 2\int_0^a f(x)dx \end{equation} % and for \textbf{\textit{Odd Functions}}: % \begin{equation} \int_{-a}^a f(x)dx = 0 \end{equation} % This is because of the definition of \textbf{\textit{Even}} and \textbf{\textit{Odd Functions}}. \begin{center} \section*{\small Examples} Coming soon$!^{\text{TM}}$ \end{center}
{ "alphanum_fraction": 0.7178181046, "avg_line_length": 47.351758794, "ext": "tex", "hexsha": "1a58fa2de3c85b949dff7b69863bd4e247b2007c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f67cd303f852f0fd814bfb0140591f15f9d440c0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "BenArmentor/Mechanical-Engineering-Curriculum", "max_forks_repo_path": "Calculus I/Notes/chapter5.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f67cd303f852f0fd814bfb0140591f15f9d440c0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "BenArmentor/Mechanical-Engineering-Curriculum", "max_issues_repo_path": "Calculus I/Notes/chapter5.tex", "max_line_length": 789, "max_stars_count": 1, "max_stars_repo_head_hexsha": "f67cd303f852f0fd814bfb0140591f15f9d440c0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "BenArmentor/Mechanical-Engineering-Curriculum", "max_stars_repo_path": "Calculus I/Notes/chapter5.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-06T01:05:34.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-06T01:05:34.000Z", "num_tokens": 3034, "size": 9423 }
\subsubsection{Gaussian Integral} \noindent Let's compute $\iint\limits_{D}{e^{-x^2-y^2}\mathrm{d}A}$ where $D$ is the unit disk. \begin{equation*} A = \int_{-1}^{1}{\int_{-\sqrt{1 - x^2}}^{\sqrt{1 - x^2}}{e^{-x^2 - y^2}\mathrm{d}y}\mathrm{d}x} = \int_{0}^{1}{\int_{0}^{2\pi}{e^{-r^2}r\mathrm{d}\theta}\mathrm{d}r} = \int_{0}^{1}{re^{-r^2}\mathrm{d}r} \cdot \int_{0}^{2\pi}{\mathrm{d}\theta}. \end{equation*} Let $u = -r^2$, $\mathrm{d} u =-2r\mathrm{d}r$.\\ \begin{align*} &= \frac{1}{2}\int_{0}^{-1}{e^u\mathrm{d}u} \cdot 2\pi \\ &= \pi\left(1 - \frac{1}{e}\right). \end{align*} Now, let's have $D = \mathbb{R}^2$. This is the famous Gaussian Integral. \begin{align*} &= \int_{0}^{\infty}{\int_{0}^{2\pi}{re^{-r^2}\mathrm{d}\theta}\mathrm{d}r} \\ &= \int_{0}^{\infty}{re^{-r^2}\mathrm{d}r} \cdot \int_{0}^{2\pi}{\mathrm{d}\theta}. \end{align*} Let $ u =-r^2$, $\mathrm{d}u = -2r\mathrm{d}r$. \begin{align*} &= -\pi\int_{0}^{-\infty}{e^{u}\mathrm{d}u} \\ &= -\pi\left(\left(\lim_{a\to -\infty}{e^{a}}\right) - e^0\right) \\ &= -\pi\left(0 - 1\right) \\ &= \pi \end{align*}
{ "alphanum_fraction": 0.5452079566, "avg_line_length": 46.0833333333, "ext": "tex", "hexsha": "e23ddc5aa0500f4a5541ba7e455bee711e9d6259", "lang": "TeX", "max_forks_count": 10, "max_forks_repo_forks_event_max_datetime": "2021-08-17T15:21:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-10T05:41:17.000Z", "max_forks_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "aneziac/Math-Summaries", "max_forks_repo_path": "multiCalc/curvilinearCoordinates/gaussianIntegral.tex", "max_issues_count": 26, "max_issues_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_issues_repo_issues_event_max_datetime": "2021-10-07T04:47:03.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-28T17:44:18.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "aneziac/Math-Summaries", "max_issues_repo_path": "multiCalc/curvilinearCoordinates/gaussianIntegral.tex", "max_line_length": 246, "max_stars_count": 39, "max_stars_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "aneziac/Math-Summaries", "max_stars_repo_path": "multiCalc/curvilinearCoordinates/gaussianIntegral.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:38:45.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-26T06:20:36.000Z", "num_tokens": 544, "size": 1106 }
\section{closed channel transfer matrix folding} Now that we have a matrix of size $(N+N_c)x(N+N_c)$ it would be convenient to be able to represent it as a matrix of size $NxN$. We will call this reduction "folding" as we are folding the closed channels into a smaller matrix. There is conservation of information in this matrix of reduced rank as the elements are messier. The folding operation occurs one closed channel at a time, but it is shown by a recursion relation that the operation can be done by induction for an infinite number of closed channels. Bagwell does the operation but gives an incorrect recursion relation. See~\cite{1990_Bagwell} page 358, equation 25. dealing with a 4x4 matrix (2 open and 2 closed channels), solve for $t_{24}$ in the fourth equation: \begin{equation} t_{24} = -\frac{\Gamma_{41}}{\Gamma_{44}+2\kappa_4} t_{21} + -\frac{\Gamma_{42}}{\Gamma_{44}+2\kappa_4} t_{22} + -\frac{\Gamma_{43}}{\Gamma_{44}+2\kappa_4} t_{23} \end{equation} then plug that into the three remaining equations. Group like terms \begin{equation} 0 = ((\Gamma_{11}-\frac{\Gamma_{14}\Gamma_{41}}{\Gamma_{44}+2 \kappa_4}) - 2 i k_1)t_{21} + (\Gamma_{12}-\frac{\Gamma_{14}\Gamma_{42}}{\Gamma_{44}+2 \kappa_4})t_{22} + (\Gamma_{13}-\frac{\Gamma_{14}\Gamma_{43}}{\Gamma_{44}+2 \kappa_4})t_{23} \end{equation} Now we can write a 3x3 matrix \begin{equation} \left( \begin{array}{c} 0 \\ -2 i k_2 \\ 0 \end{array} \right) = \left( \begin{array}{cccc} (\Gamma_{11}-\frac{\Gamma_{14}\Gamma_{41}}{\Gamma_{44}+2 \kappa_4})-2 i k_1 & (\Gamma_{12}-\frac{\Gamma_{14}\Gamma_{42}}{\Gamma_{44}+2 \kappa_4}) & (\Gamma_{13}-\frac{\Gamma_{14}\Gamma_{43}}{\Gamma_{44}+2 \kappa_4}) \\ (\Gamma_{21}-\frac{\Gamma_{24}\Gamma_{41}}{\Gamma_{44}+2 \kappa_4}) & (\Gamma_{22}-\frac{\Gamma_{24}\Gamma_{42}}{\Gamma_{44}+2 \kappa_4})-2 i k_2 & (\Gamma_{23}-\frac{\Gamma_{24}\Gamma_{43}}{\Gamma_{44}+2 \kappa_4}) \\ (\Gamma_{31}-\frac{\Gamma_{34}\Gamma_{41}}{\Gamma_{44}+2 \kappa_4}) & (\Gamma_{32}-\frac{\Gamma_{34}\Gamma_{42}}{\Gamma_{44}+2 \kappa_4}) & (\Gamma_{33}-\frac{\Gamma_{34}\Gamma_{43}}{\Gamma_{44}+2 \kappa_4})+2 i \kappa_3 \end{array} \right) \left( \begin{array}{c} t_21 \\ t_22 \\ t_23 \end{array} \right) \label{singlescattererfirstfold} \end{equation} observe the recursion relation \begin{equation} \Gamma_{ij,4} = \Gamma_{ij} - \frac{\Gamma_{i4}\Gamma_{4j}}{\Gamma_{44}+2 \kappa_4} \end{equation} which generalizes to a recursion relation \begin{equation} \Gamma_{ij}^{(n)} = \Gamma_{ij}^{(n+1)} - \frac{\Gamma_{i(n+1)}^{(n+1)} \Gamma_{(n+1)j}^{(n+1)}}{\Gamma_{(n+1)(n+1)}^{(n+1)}+2 \kappa_{(n+1)}} \end{equation} Things to keep in mind: multiplying folded matrices is not equivalent to multiplying large matrices and then folding. This recursion relation demonstrates that an infinite number of closed channels can be accounted for (with the proper normalization). % see Ben's notes, 20080618 Now we'll repeat the process of folding for the general one scatterer matrix for N open channels and $N_c$ closed channels. \begin{equation} \left( \left( \begin{array}{ccc} \hat{\Gamma}_{pp} & | & \hat{\Gamma}_{pq} \\ --- & + & --- \\ \hat{\Gamma}_{qp} & | & \hat{\Gamma}_{qq} \end{array} \right) - 2 i \left( \begin{array}{cccc} k_1 & & & 0 \\ & k_2 & & \\ & & \ddots & \\ 0 & & & k_{N+N_c} \end{array} \right) \right) \left( \begin{array}{c} \vec{t}_p \\ \vec{t}_q\end{array} \right) = free terms from input \end{equation} where if $n>N$ then $k=i\kappa$. Do the bottom half (closed channels only) of the matrix multiplication, \begin{equation} \hat{\Gamma}_{qp} \vec{t}_p + (\hat{\Gamma}_{qq} + 2 \hat{\kappa}_q)\vec{t}_q = \left( \begin{array}{c} 0 \\ \vdots \\ 0 \end{array} \right)_q \end{equation} Zeros on the left since the evanescent modes can not have inputs. No $k$ dependence since there are no diagonal elements. Solve for $\vec{t}_q$, \begin{equation} (\Gamma_{qq}+2 \vec{\kappa}_q) \vec{t}_q = - \hat{\Gamma}_qp \vec{t}_p \end{equation} \begin{equation} \vec{t}_q = -(\hat{\Gamma}_{qq} + 2 \hat{\kappa}_q)^{-1} (\hat{\Gamma}_{qp} \vec{t}_p) \end{equation} Now it's time for the upper set (open channels) \begin{equation} free terms = (\hat{\Gamma}_{pp}-2 i \hat{k}_p)\vec{t}_p + \hat{\Gamma}_{pq} \vec{t}_q \end{equation} plug into $\vec{t}_q$ \begin{equation} free terms = (\hat{\Gamma}_{pp}-2 i \hat{k}_p)\vec{t}_p - \hat{\Gamma}_{pq} ((\hat{\Gamma}_{qq}+2 \vec{\kappa}_q)^{-1})(\hat{\Gamma}_{qp} \vec{t}_p) \end{equation} factor out $\vec{t}_p$ \begin{equation} free terms = ((\hat{\Gamma}_{pp}-2 i \hat{k}_p)\vec{t}_p - \hat{\Gamma}_{pq} ((\hat{\Gamma}_{qq}+2 \vec{\kappa}_q)^{-1})\hat{\Gamma}_{qp}) \vec{t}_p \end{equation} which compared to equation B8 in \cite{2007_Froufe-Perez_PRE} \begin{equation} \hat{\tilde{U}}_{pp} = \hat{U}_{pp} - \hat{U}_{pq} \frac{1}{\sqrt{2 \vec{\kappa}_Q}}\frac{1}{I+ \frac{1}{\sqrt{2 \kappa_Q}}\hat{U}_{QQ}\frac{1}{\sqrt{2 \kappa_Q}} } \frac{1}{\sqrt{2 \kappa_Q}}\hat{U}_{QP} \end{equation} \begin{equation} \hat{\tilde{U}}_{pp} = \hat{U}_{pp} - \hat{U}_{pq} \frac{1}{I 2 \vec{\kappa}_Q + 2 \vec{\kappa}_Q \hat{U}_{QQ}}\hat{U}_{QP} \end{equation} \begin{equation} \hat{\tilde{U}}_{pp} = \hat{U}_{pp} - \hat{U}_{pq} (2 \kappa_Q + U_{QQ})^{-1} U_{QP} \end{equation} They match!
{ "alphanum_fraction": 0.6304190751, "avg_line_length": 43.9365079365, "ext": "tex", "hexsha": "c03fd6c203fe002c5bb78fe5ba1448be2f6e988b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "bhpayne/physics_phd_dissertation", "max_forks_repo_path": "chapters/appendix_open_closed_channel_folding.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "bhpayne/physics_phd_dissertation", "max_issues_repo_path": "chapters/appendix_open_closed_channel_folding.tex", "max_line_length": 513, "max_stars_count": null, "max_stars_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "bhpayne/physics_phd_dissertation", "max_stars_repo_path": "chapters/appendix_open_closed_channel_folding.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2040, "size": 5536 }
\documentclass[output=paper]{langscibook} \ChapterDOI{10.5281/zenodo.5524290} \author{Artemis Alexiadou\affiliation{Humboldt-Universität zu Berlin} and Elena Anagnostopoulou\affiliation{University of Crete}} \title{Greek aspectual verbs and the causative alternation} \abstract{In this paper we examine a particular type of causative construction in Greek, built on the basis of the verb \textit{matheno} ‘learn’ and aspectual verbs like \textit{arhizo} ‘start’. Focusing on the latter and building on \citet{Amberber1996} and \citet{Anagnostopoulou2001}, we will analyze causative constructions involving aspectual verbs as a sub-case of the (anti-)causative alternation. We will further propose to correlate this with the fact that aspectual verbs in Greek have been shown to be ambiguous between control and raising interpretations, following \citet{MourounasWilliamson2019}. Finally, we speculate that the cross-linguistic variation between Greek and English can be attributed to the cross-linguistic availability of the conative alternation.} \begin{document} \maketitle \section{Introduction} In this paper, we investigate a certain type of causative construction in Greek, recently discussed in \citet{AnagnostopoulouSevdali2020}. These are built on the basis of the verb \textit{matheno} `learn' and aspectual verbs like \textit{arhizo} `start', \textit{ksekinao} `start' and \textit{sinehizo} `continue' and are illustrated in \REF{alexiadouex:key:1} and \REF{alexiadouex:key:2}: \ea \label{alexiadouex:key:1} \ea[]{ \label{alexiadouex:key:1a} \gll I Maria emathe dhisko/dhiskovolia.\\ The Mary.\textsc{nom} learned discus.\textsc{acc}\\ \glt `Mary learned discus.' } \ex[]{ \label{alexiadouex:key:1b} \gll O proponitis emathe tis Marias / tin Maria dhisko/dhiskovolia.\\ The trainer.\textsc{nom} learned the Mary.\textsc{gen} {} the Mary.\textsc{acc} discus.\textsc{acc}\\ \glt `The trainer taught Mary discus.' } \z \ex \label{alexiadouex:key:2} \ea[]{ \label{alexiadouex:key:2a} \gll I Maria arhise Aglika.\\ The Mary.\textsc{nom} started English.\textsc{acc}\\ \glt `Mary started (to learn) English.' } \ex[]{ \label{alexiadouex:key:2b} \gll Tha \{ tis / tin \} arhiso \{ tis Marias / tin Maria \} Aglika.\\ \textsc{fut} {} \textsc{cl.gen} {} \textsc{cl.acc} {} start.\textsc{1sg} {} the Mary.\textsc{gen} {} the Mary.\textsc{acc} {} English\\ \glt `I will make Mary start (to learn) English.' } \z \z In this paper, we will focus on \REF{alexiadouex:key:2}, the examples with aspectual verbs. Building on insights in \citet{Amberber1996} and \citet{Anagnostopoulou2001} on ingestive predicates, e.g. \textit{eat} but also \textit{learn}, we will analyze the alternation in \REF{alexiadouex:key:2} as a sub-case of the (anti-)causative alternation, cf. \citet{Levin1993}, and \citet{MourounasWilliamson2019}. This will straightforwardly explain why such examples encode causative semantics. Specifically, we will consider \REF{alexiadouex:key:2a} a dyadic anticausative predicate, and \REF{alexiadouex:key:2b} the causative variant thereof. According to \citet{Amberber1996} and \citet{Anagnostopoulou2001}, the unexpected behavior of ingestive verbs has to do with the fact that the goal argument is interpreted as an Agent, when no external argument is present. We will show that \REF{alexiadouex:key:2} is an ingestive structure and thus subject to the same principle. In \REF{alexiadouex:key:2a} the goal argument is interpreted as an Agent, as there is no external argument present. This is not the case in \REF{alexiadouex:key:2b}, where the external argument is present. While \REF{alexiadouex:key:2a} is transitive on the surface, it does not behave like a typical transitive verb, as it cannot undergo passivization. On this view, \REF{alexiadouex:key:2b} is a causative construction in which the subject is the cause of the initial sub-event of a Mary learning English event, and \REF{alexiadouex:key:2a} is its anticausative variant. Following \citet{MourounasWilliamson2019}, we will propose to correlate this with the fact that aspectual verbs in Greek have been shown to be ambiguous between control and raising interpretations \citep{alexiadouanagnostopoulou1999, Roussou2009, AlexiadouAnagnostopoulouIordachioaiaMarchis2010, AlexiadouAnagnostopoulouIordachioaiaMarchis2012, AlexiadouAnagnostopoulouWurmbrand2014}, just as in English. In previous work, we argued that aspectual verbs in Greek form restructuring-type biclausal domains via a Long Distance Agree chain between the matrix and the embedded, semantically null, T with fully specified $\varphi$-features. This forces coindexation between the matrix and the embedded subject and Obligatory Backward or Forward Control and Raising/Long Distance Agreement phenomena. Following \citet{MourounasWilliamson2019}, we will propose that aspectual verbs have a single lexical entry for both subjunctive and nominal complements. \REF{alexiadouex:key:2b} is in fact similar to \citegen{Grano2016} example \textit{John started Bill smoking} and provides evidence against the claim that aspectual verbs do not permit overt subjects in their non-finite complements (cf. \citegen{Grano2016} \textit{overt embedded subjects} generalization). As we take the examples in \REF{alexiadouex:key:2} to involve ingestive predicates, we will conclude that the cross-linguistic variation between Greek and English can be attributed to the cross-linguistic availability of the conative alternation. English allows the counterpart of \REF{alexiadouex:key:2b} if the theme argument is introduced via a PP; Greek does not have a systematic conative alternation and, therefore, it does not require a PP in constructions comparable to that in \REF{alexiadouex:key:2b}. \section{The anticausative alternation with Greek aspectual verbs} As is well known, in English and in Greek verbs like \textit{break} or \textit{open} undergo the causative alternation: \ea%3 \label{alexiadouex:key:3} \ea John broke the window. \ex The window broke. \z \ex%4 \label{alexiadouex:key:4} \ea \gll O Janis anikse to parathiro.\\ The John opened.\textsc{3sg} the window.\textsc{acc}\\ \glt ‘John opened the window.’ \ex \gll To parathiro anikse.\\ The window.\textsc{nom} opened.\textsc{3sg}\\ \glt ‘The window opened.’ \z \z One diagnostic to distinguish anticausatives from passives discussed at length in \citet{AlexiadouAnagnostopoulouSchafer2015}, building on \citet{LevinRappaportHovav1995}, is the availability of the \textit{by-itself} modifier. While anticausatives allow the \textit{by-itself} phrase, passives disallow it. \citet{AlexiadouAnagnostopoulouSchafer2015} argue that this relates to the \textit{no particular cause} interpretation associated with the \textit{by-itself} phrase in English and its counterparts across languages. This is incompatible with the interpretation of the passive, which implies the presence of an external argument. By contrast, English passives, but not anticausatives, allow agentive \textit{by}-phrases: \ea%5 \label{alexiadouex:key:5} \ea The window was broken *by itself/by John. \ex The window broke by itself/*by John. \z \z \citet{MourounasWilliamson2019} argue that aspectual verbs undergo the causative alternation in English, as they do not tolerate agentive \textit{by} phrases as opposed to the passive variant, see \REF{alexiadouex:key:6}: \ea%6 \label{alexiadouex:key:6} \ea The official began the London marathon. \ex The London marathon began. \ex The London marathon was begun by the official. \ex The London marathon began (*by the official). \z \z Greek aspectual verbs behave similarly. They form actively marked anticausatives and can be modified by \textit{by-itself}. While \textit{begin} does not have a non-actively marked passive variant, the non-actively marked variant of \textit{stop} is marginally acceptable and is interpreted as passive \REF{alexiadouex:key:7c}, similarly to non-actively marked intransitive variants of Greek de-adjectival verbs.% \footnote{As \citet{AlexiadouAnagnostopoulouSchafer2015} and references therein discuss at length, Greek also has several anticausatives which bear Non-Active morphology. In the case of de-adjectival verbs, the authors point out that the anticausative bears active morphology and the further intransitive variant, which bears Non-Active, is interpreted solely as a passive.} \ea%7 \judgewidth{\%} \label{alexiadouex:key:7} \ea[]{ \gll O astinomikos stamatise tin kikloforia.\\ The policeman stopped the traffic\\ } \ex[]{ \gll I kikloforia stamatise apo {moni tis}.\\ The traffic stopped by itself\\ } \ex[\%]{\label{alexiadouex:key:7c} \gll I kikloforia stamatithike apo tus astinomikus.\\ The traffic {was stopped} by the policemen\\ } \z \z We argue that the examples in \REF{alexiadouex:key:2}, repeated below, are a further instantiation of the causative alternation, the difference being that \REF{alexiadouex:key:2a} is a dyadic anticausative.% \footnote{An anonymous reviewer asks if all aspectual verbs behave alike. In our judgement, they do, but they differ with respect to the realization of the theme argument. With \textit{stamatao} ‘stop’, \textit{sinexizo} `continue’, the theme argument must be a DP, and it can't be a bare NP, unlike the complement of \textit{arhizo} `start' in \REF{alexiadouex:key:8}. This is an interesting difference which relates to the fact that there is a presupposition associated with these verbs that a particular event has started. Entities that are known both to the speaker and the hearer are DPs in Greek, see also Footnote~\ref{alexiadouftn:key:6}. With \textit{teliono} ‘finish’, the theme is introduced via the preposition \textit{me} ‘with’. } \ea \label{alexiadouex:key:8} \ea \gll I Maria arhise Aglika. \\ The Mary.\textsc{nom} started English.\textsc{acc} \\ \glt ‘Mary started (to learn) English.’ \ex \gll Tha \{ tis / tin \} arhiso \{ tis Marias / tin Maria \} Aglika.\\ \textsc{fut} {} \textsc{cl.gen} {} \textsc{cl.acc} {} start.\textsc{1sg} {} the Mary.\textsc{gen} {} the Mary-\textsc{acc} {} English\\ \glt `I will make Mary start (to learn) English.' \z \z Support for this comes from the observation that \REF{alexiadouex:key:2a} resists passivization: \ea[*]{ \gll Ta Aglika arhistikan apo ti Maria.\\ The English.\textsc{nom} started.\textsc{nact} by the Mary.\textsc{acc} \\ \glt ‘English was started by Mary.’ } \z Building on \citet{Anagnostopoulou2001}, in \REF{alexiadouex:key:2b}, the DP argument is interpreted as a goal as there is a higher agent present. A characteristic property of \REF{alexiadouex:key:2b} is that the embedded verb is necessarily interpreted as ‘learn’, which describes acquisition of information that may be viewed as a type of ingestion. The existence of examples where the embedded verb can also be ‘eat’ or ‘drink’ in \REF{alexiadouex:key:9} supports the claim that these constructions belong to the broader class of ingestives \citep[213--217]{Levin1993}, construed as `taking something into the body or mind (literally or figuratively)' \citep[46]{Masica1976}: \ea%9 \label{alexiadouex:key:9} \ea \gll Tha \{ tis / tin \} arxiso \{ tis Marias / tin Maria \} fruta.\\ \textsc{fut} {} \textsc{cl.gen} {} \textsc{cl.acc} {} start.\textsc{1sg} {} the Mary.\textsc{gen} {} the Mary.\textsc{acc} {} fruit\\ \glt ‘I will make Mary start (to eat) fruit.’ \ex \gll Tha \{ tis / tin \} arxiso \{ tis Marias / tin Maria \} gala.\\ \textsc{fut} {} \textsc{cl.gen} {} \textsc{cl.acc} {} start.\textsc{1sg} {} the Mary.\textsc{gen} {} the Mary.\textsc{acc} {} milk\\ \glt ‘I will make Mary start (to drink) milk.’ \z \z Ingestive verbs are known in the literature to display exceptional behavior across languages, a fact which has been related to the observation that the person that consumes e.g. food, liquids (as in \textit{eat} or \textit{drink}) or knowledge (as in \textit{learn}, \textit{study}) not only controls but is also affected by the consumption event. Cross-linguistic evidence suggests that languages treat ingestive verbs differently from ordinary transitive verbs (see \citealt{Jerro2019} for a recent summary, cf. \citealt{Amberber1996,Jackendoff1990}). In e.g. Amharic these verbs pattern with unaccusatives rather than with transitives with respect to causativization \citep{Amberber1996}. This in turn can be related to the fact that in the presence of an external argument the DP is interpreted as a goal, while in the absence of an external argument, the DP is interpreted as an agent, as suggested in \citet{Anagnostopoulou2001} for \textit{learn}.% \footnote{Different implementations of this have been put forth in the literature. \citet{Anagnostopoulou2001} argues that the interpretation of the DP as an agent or a goal depends on the presence of an external argument. \citet{Amberber1996} proposes that in the anticausative structure the Agent and the Goal role are coindexed. \citet{Krejci2012} claims that ingestive verbs are inherent reflexives, an analysis adopted in \citet{Jerro2019}. He argues that the subject of \textit{eat} is associated with various entailments that are split across two arguments in \textit{feed}.} Because of this, \REF{alexiadouex:key:2a} is in principle compatible with agentive adverbials, a fact that we attribute to the particular interpretation associated with ingestive structures, despite the fact that this argument is not introduced by Voice, the head canonically introducing agents.% \footnote{Many thanks to an anonymous reviewer for pointing this out to us.} With respect to the case patterns exhibited in \REF{alexiadouex:key:2b}, \citet{AnagnostopoulouSevdali2020} extensively argue that the optionality in the case of the causee argument is only apparent. When the lower direct object is definite, as in examples \REF{alexiadouex:key:10}, only the genitive causee is licit; the accusative one is ungrammatical. \ea \label{alexiadouex:key:10} \ea[]{ \gll Pjos \{ \textsuperscript{ok}tis / *tin \} emathe \{ \textsuperscript{ok}tis Marias / *tin Maria \} ta Aglika?\\ Who {} \textsuperscript{ok}\textsc{cl.gen} {} *\textsc{cl.acc} {} learned {} \textsuperscript{ok}the Mary.\textsc{gen} {} *the Mary.\textsc{acc} {} the English?\\ \glt ‘Who taught Mary the English language?’ } \ex[]{ \gll Tha \{ \textsuperscript{ok}tis / *tin \} arhiso \{ \textsuperscript{ok}tis Marias / *tin Maria \} ta Aglika.\\ \textsc{fut} {} \textsuperscript{ok}\textsc{cl.gen} {} *\textsc{cl.acc} {} start.\textsc{1sg} {} \textsuperscript{ok}the Mary.\textsc{gen} {} *the Mary.\textsc{acc} {} the English\\ \glt ‘I will make Mary start (to learn) English.’ } \z \z The case of the causee argument is thus sensitive to the realization of the lower object: when this object is a definite DP, the causee must be genitive. It is only in the presence of a lower bare NP, as in \REF{alexiadouex:key:1} and \REF{alexiadouex:key:2} that both cases are possible.% \footnote{An anonymous reviewer asks if it is the DP vs. NP distinction that is crucial here or the definite/non-definite distinction, as one could think of English as definite (proper name like) even in the absence of a determiner. In Greek, unlike in English, proper names necessarily appear with a determiner. \citet{AlexopoulouFolli2011,AlexopoulouFolli2019} have argued that Greek definite determiners are not expletive when they appear with proper names, but rather have a semantic effect. It brings about an interpretation, according in which the noun is known both to the speaker and the hearer. The same reviewer asks if the anticausative of \REF{alexiadouex:key:11} is possible in Greek, which it is.\label{alexiadouftn:key:6}} \citet{AnagnostopoulouSevdali2020} argue at length that the above described case distribution can be naturally accounted for if genitive case in Greek is dependent case upward which is assigned in the vP domain in opposition to a lower DP while accusative case is dependent case downward assigned in the TP domain in opposition to a higher DP. When the lower object is a bare NP it only optionally counts as a case competitor for the assignment of dependent genitive. Genitive is assigned when the lower NP counts as a case competitor and accusative (dependent case in opposition to the external argument) is assigned when it doesn’t. This conclusion is reinforced by the observation that when the lower argument is a PP, which does not count as a case competitor, the higher one must bear accusative case and cannot have dependent genitive, as shown in \REF{alexiadouex:key:11}. \ea%11 \label{alexiadouex:key:11} \gll Pjos \{ *tis / \textsuperscript{ok}tin \} emath-e \{ *tis Maria-s / \textsuperscript{ok}tin Maria \} s-ta narkotika?\\ Who {} *\textsc{cl.gen} {} \textsuperscript{ok}\textsc{cl.acc} {} learn-\textsc{pst.3sg} {} *the Maria-\textsc{gen} {} \textsuperscript{ok}the Maria.\textsc{acc} {} to-the drugs.\textsc{acc}? \\ \glt ‘Who got Maria addicted to drugs?’ \z The final point that we would like to make with respect to aspectual verbs is that they can also take subjunctive complements and in this case they have been argued to be ambiguous between control and raising interpretations, see \citet{alexiadouanagnostopoulou1999} and \citet{Roussou2009}. Unlike English, Greek lacks infinitival complements: sentences that correspond to infinitivals in English are introduced by the subjunctive particle \textit{na}. Agent-oriented adverbs are possible with aspectual verbs and they necessarily have matrix scope, as shown in \REF{alexiadouex:key:12}. Moreover, they form imperatives, as shown in \REF{alexiadouex:key:13}: \ea%12 \label{alexiadouex:key:12} \ea[]{ \gll Epitidhes arhisa na magirevo stis 5.00.\\ {on purpose} started.\textsc{1sg} \textsc{subj} cook.\textsc{1sg} at 5.00\\ \glt `I started on purpose tocook at 5:00.' } \ex[]{ \gll Epitidhes stamatisa na perno ta farmaka.\\ {on purpose} stopped.\textsc{sg} \textsc{subj} take.\textsc{1sg} the medicine\\ \glt `I stopped on purpose to take medication.'} \z \ex%13 \label{alexiadouex:key:13} \ea[]{ \gll Arhise na diavazis!\\ Start.\textsc{2sg} \textsc{subj} read.\textsc{2sg}\\ \glt ‘Start reading!’ } \ex[]{ \gll Stamata na kapnizis!\\ Stop.\textsc{2sg} \textsc{subj} smoke.\textsc{2sg}!\\ \glt ‘Stop smoking!’ } \z \z On the basis of idiomatic expressions, \citet{alexiadouanagnostopoulou1999} show that aspectual verbs can be raising verbs. In Greek, fixed nominatives as part of idiomatic expressions occur in postverbal position. \ea%14 \label{alexiadouex:key:14} \ea[]{ \label{alexiadouex:key:14a} \gll Mu bikan psili st'aftia.\\ \textsc{cl.1sg.gen} entered.\textsc{3pl} fleas.\textsc{nom} {in the ears}\\ \glt ‘I became suspicious.’ } \ex[*]{ Psili mu bikan st'aftia. } \z \z Examples like \REF{alexiadouex:key:14a} can be embedded under \textit{arhizo} and \textit{stamatao}. The subject in the embedded clause agrees with the embedded and the matrix verb: \ea%15 \label{alexiadouex:key:15} \gll Stamatisan / arhisan na mu benun psili st'aftia.\\ Stopped.\textsc{3pl} {} started.\textsc{3pl} \textsc{subj} \textsc{cl}{}.\textsc{1sg.gen} enter.\textsc{3pl} fleas-\textsc{nom.pl} {in the ears}\\ \glt ‘I stopped being/started becoming suspicious.’ \z In \REF{alexiadouex:key:15} the nominative depends on the lower verb for its interpretation and yet it agrees with both verbs obligatorily. Lack of agreement, leads to ungrammaticality, as shown in \REF{alexiadouex:key:16}: \ea%16 \label{alexiadouex:key:16} \gll *Stamatise / arhise na mu benun psili st'aftia.\\ Stopped-\textsc{3sg} {} started-3\textsc{3sg} \textsc{subj} \textsc{cl.1sg.gen} enter-\textsc{3pl} fleas-\textsc{nom} {in the ears} \\ \glt ‘I stopped being/started becoming suspicious.’ \z \citet{alexiadouanagnostopoulou1999} point out that the fact that agreement between the subject and the matrix verb is obligatory, is an argument that these constructions display Agree without movement. They conclude that aspectual verbs are ambiguous between a control and a raising interpretation, see also \citet{Roussou2009}.% \footnote{ An anonymous reviewer points out that the behavior of \textit{arhizo} that we describe here is reminiscent of other embedding verbs that have been argued to alternate between a causative and a non-causative meaning, depending on whether the embedded verb is controlled or not, e.g. \textit{prospatho} ‘try’. An attempt to relate the behavior of \textit{prospatho} to our alternation here would bring us too far afield. } \section{Towards an analysis} Following \citet{MourounasWilliamson2019}, we propose that there is a single lexical entry associated with both subjunctive and nominal complements of aspectual verbs. Adopting the analysis proposed in \citet{AlexiadouAnagnostopoulouSchafer2015}, we assign the structures in \REF{alexiadouex:key:17} to anticausative and causative variants of aspectual verbs in Greek. Greek sentences like \REF{alexiadouex:key:2a} have an anticausative analysis, \REF{alexiadouex:key:17a}. The subject DP originates in the ResultP, which can be seen as a small clause consisting of the subject and a DP which has a coerced event interpretation (‘English’ understood as ‘learn English’). The subject of the small clause undergoes ‘raising’ entering Agree with T. On the other hand, \REF{alexiadouex:key:17b} is the causative counterpart which projects a Voice above the v+Root combination introducing an external argument. The subject DP in \REF{alexiadouex:key:17b} enters Agree with T and ‘Mary’ receives either dependent genitive or dependent accusative depending on the nature of the lower DP (NP or DP or PP). \ea%17 \label{alexiadouex:key:17} \ea \label{alexiadouex:key:17a} \textit{anticausative} \textit{begin}: Greek \textit{Mary started English} (comparable to ‘Mary started the journey’, ‘Mary started smoking’ in English) \\ \begin{forest} [vP [\phantom{xxxx}] [v' [v-Root] [Result [{Mary English},roof ] ] ] ] \end{forest} \ex \label{alexiadouex:key:17b} \textit{causative} \textit{begin}: Greek \textit{I started Mary English} (comparable to `I started John smoking' in English) \\ \begin{forest} [VoiceP [DP] [vP [v-Root] [Result [{Mary English}, roof ] ] ] ] \end{forest} \z \z Building on \citet{MourounasWilliamson2019}, we correlate the anticausative structure of aspectual verbs with the raising interpretation, while the causative structure with the control interpretation, as in \REF{alexiadouex:key:18}: \ea%18 \label{alexiadouex:key:18} \ea \label{alexiadouex:key:18a} \textit{anticausative} \textit{begin}, \textit{TP} \textit{compl.} \textit{raising} \\ \ob T$\varphi_k$ \ob\textsubscript{vP} \ob\textsubscript{RootP} start/ stop \ob\textsubscript{MoodP} na \ob\textsubscript{TP} T$\varphi_k$ DP$\varphi_k$ \cb \cb \cb \cb \cb \ex \label{alexiadouex:key:18b} \textit{causative} \textit{begin}, \textit{TP} \textit{compl,} \textit{control} \\ \ob T \ob\textsubscript{VoiceP} DP \ob\textsubscript{vP} \ob\textsubscript{RootP} start/ stop \ob\textsubscript{MoodP} na \ob\textsubscript{TP} \ob\textsubscript{VoiceP} PRO \cb \cb \cb \cb \cb \cb \cb \z \z In \REF{alexiadouex:key:18a}, the raising structure, no Voice is projected above matrix VP (the Root + v combination) and the embedded subject undergoes Raising or enters Long Distance Agreement with the matrix T. On the other hand, Voice is present above the matrix DP introducing a matrix subject which enters an obligatory control relation with a null PRO embedded subject. \citet{MourounasWilliamson2019}, building on \citet{wurmbrand2001,Wurmbrand2002,Wurmbrand2014}, assume that in languages with infinitives like English, complements of aspectual verbs are vPs which lack a TP component. This is not the case in Greek which provides evidence for the presence of a semantically empty T head and a Mood head occupied by the subjunctive particle \textit{na}, see \citet{AlexiadouAnagnostopoulouToapp}. In the above sketched system, the control analysis of aspectuals is captured by the presence of VoiceP in the matrix clause. By contrast, the raising analysis is captured by the fact that these verbs undergo the causative alternation and their intransitive variants lack Voice. This naturally provides an explanation for the causative interpretation associated with aspectual verbs observed in \REF{alexiadouex:key:2b} and for the alternation between \REF{alexiadouex:key:2a} and \REF{alexiadouex:key:2b} which originates in the presence of an external argument in the causative construction \REF{alexiadouex:key:2b} and its absence in the (anti-)causative \REF{alexiadouex:key:2a}. Before closing this squib, we briefly address two questions. First, why is it that aspectuals in Greek may license ECM with small clauses of the type illustrated in \REF{alexiadouex:key:2b} but not with full clausal complements \REF{alexiadouex:key:19b}, and why is it that \REF{alexiadouex:key:19a} is grammatical but \REF{alexiadouex:key:19b} is not? \ea%19 \label{alexiadouex:key:19} \ea[]{ \label{alexiadouex:key:19a} \gll I Maria arhise na matheni Aglika.\\ The Mary.\textsc{nom} started-\textsc{3sg} \textsc{subj} learn.\textsc{3sg} English\\ \glt ‘Mary started to learn English.’ } \ex[*]{ \label{alexiadouex:key:19b} \gll Arhisa tin Maria na matheni Aglika.\\ started.\textsc{1sg} the Mary.\textsc{acc} \textsc{subj} learn.\textsc{3sg} English\\ } \z \z Second, what explains the fact that constructions like \REF{alexiadouex:key:2a} and \REF{alexiadouex:key:2b} are possible in Greek but not in English? With respect to the first question, we will follow \citet{Grano2016} and \citet{MourounasWilliamson2019}, who propose that the semantics of subject-introducing infinitives are interpretably incompatible with the lexical semantics of aspectual verbs. ECM infinitives (whether they are CPs introduced by ‘for’ or TPs) necessarily encode modality \citep{kratzer2006,Moulton2009,Grano2016}, and they are uninterpretable when combined with non-modal eventualities such as those introduced by aspectual verbs. As a result of this, only non-modal properties of eventualities may serve as interpretable restrictors of the event variable introduced by aspectual verbs. We will adopt this analysis and will assume that it also applies to ECM subjunctives. In the Greek small clause constructions under discussion of the type seen in \REF{alexiadouex:key:2b} as well as in examples like ‘I started John smoking’ in English, there is no modal operator blocking embedding under aspectuals, and the relevant constructions are licit. Simiarly, raising infinitives as in \REF{alexiadouex:key:19a} do not encode modality. With respect to the second question, we note that even in English it is possible to construct \REF{alexiadouex:key:2}, however in the transitive variant the DP argument is introduced by \textit{on}, see \citet{Levin1993}:% \footnote{ We are grateful to an anonymous reviewer for bringing these examples to our attention. } \ea%20 \label{alexiadouex:key:20} \ea \label{alexiadouex:key:20a} Mary started English in the third grade. \ex \label{alexiadouex:key:20b} John started Mary on English. \z \z We tentatively propose that \textit{on} is required to license an aspectual interpretation signaling continuation and that this should be linked to the conative alternation in English which, according to \citet[42]{Levin1993} “expresses an “attempted” action without specifying this action was actually carried out”. Usually the PP employed in the intransitive conative variant is headed by \textit{at} but, interestingly, sometimes \textit{on} surfaces with certain verbs of ingesting, as pointed out by \citet{Levin1993}: \ea%21 \label{alexiadouex:key:21} \ea The mouse nibbled the cheese. \ex The mouse nibbled at/on the cheese. \z \z We would like to speculate that the \textit{on} seen in \REF{alexiadouex:key:20b} is a trace of the conative construction. Greek does not have a systematic conative alternation and, therefore, it does not require a PP in constructions comparable to \REF{alexiadouex:key:20b}. The issue awaits further research. \section*{Acknowledgments} We are indebted to two anonymous reviewers for their insightful comments. Many thanks to Susi for friendship and inspiration through the years. AL 554/8-1 (Alexiadou) and a Friedrich Wilhelm Bessel Research Award 2013 and HFRI-F17-44 (Anagnostopoulou) are hereby acknowledged. {\sloppy\printbibliography[heading=subbibliography,notkeyword=this]} \end{document}
{ "alphanum_fraction": 0.7137921013, "avg_line_length": 57.5594795539, "ext": "tex", "hexsha": "ee277e76b428a535d779967122e8018cc397dbde", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-08-25T10:17:35.000Z", "max_forks_repo_forks_event_min_datetime": "2021-08-25T10:17:35.000Z", "max_forks_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "laszabine/323", "max_forks_repo_path": "chapters/alexiadou-anagnostopoulou.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "laszabine/323", "max_issues_repo_path": "chapters/alexiadou-anagnostopoulou.tex", "max_line_length": 223, "max_stars_count": null, "max_stars_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "laszabine/323", "max_stars_repo_path": "chapters/alexiadou-anagnostopoulou.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8888, "size": 30967 }
\documentclass[oneside,a4paper]{article} % ========== Preamble (packages, definitions etc.) ========== \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{xcolor} \usepackage{amsmath, amsthm, amssymb} \usepackage{csquotes} \usepackage{hyperref} \usepackage{listings} \usepackage{lmodern} \usepackage{float} \usepackage{braket} \setlength{\parskip}{\baselineskip} %\newcounter{questionnum} \setcounter{questionnum}{0} %\newcommand{\question}[1]{% % \refstepcounter{questionnum}% % \paragraph{Question~\arabic{questionnum}:}{\emph{#1}}} \newcommand\filltoend{\leavevmode{\unskip \leaders\hrule height.5ex depth\dimexpr-.5ex+0.4pt\hfill\hbox{}% \parfillskip=0pt\endgraf}} \newcommand{\problem}[2]{% \vspace{-0.7em} \hspace{0.02\textwidth} \begin{minipage}[t][][b]{0.95\textwidth} {\bf \hspace{-0.015\textwidth}\makebox[7.5em][l]{{#1} ~~\filltoend}}% \hspace{1.2mm}{\it #2}% \end{minipage} } \lstset{ % Set the default style for code listings numbers=left, numberstyle=\scriptsize, numbersep=8pt, basicstyle=\scriptsize\ttfamily, keywordstyle=\color{blue}, stringstyle=\color{red}, commentstyle=\color{green!70!black}, breaklines=true, frame=single, language=C++, captionpos=b, tabsize=4, showstringspaces=false } \graphicspath{ {res/} } % ========== Title page ========== \title { \includegraphics[width=0.6\textwidth]{UU_logo.pdf}\\[1em] Cryptology \\ Report\\[1em] %\\[3em] Quantum cryptography: Public key distribution and coin tossing } \author{ Hendrik Bierlee \and Nodari Kankava } \begin{document} \maketitle \thispagestyle{empty} % Removes page number for front page \pagebreak \newcounter{qcounter} \setcounter{qcounter}{1} \newcommand{\question}[1]{\par\vspace{10px}\noindent\textbf{Question \theqcounter \stepcounter{qcounter}:} \emph{#1}\vspace{0.5em}\\\noindent} % ========== Document contents ========== \section{Introduction} \subsection{Problem statement of key-distribution} \label{sec:key-dist-problem} Key distribution is one of the main issues in cryptography. In a scenario where two parties want to establish a secure communication channel, first they need to agree on a shared secret key. This is an issue for symmetric cryptography protocols, where cryptographic keys has to be securely shared between sender and receiver. Easy key distribution becomes even more important when frequent key change is required or number of participants in communication increases. \subsection{Classical solution to the public key distribution} Public key cryptography allows secure distribution of secret keys between parties who don't share initial secret information. The Diffie–Hellman key exchange protocol~\cite{diffie1976new} allows two parties to agree on a shared secret key. This protocol is frequently used in systems that also achieve forward secrecy because of its fast key generation. The Diffie-Helman key exchange is considered secure, until the efficient algorithm to compute discrete logarithm is found. Classic information theory assumes that communication can be eavesdropped either passively or actively. There is no protection against copying transmitted information. Communicating parties can verify with high degree of confidence that exchanged message was not modified, but they should operate under the assumption that information was most probably copied. Another major advantage of public key cryptography is message verification and non-repudiation. \subsection{Problem statement of the remote coin-flip problem} The `coin-flip by telephone' problem was first described in 1981~\cite{blum1981coin}: two distrusting parties, Alice and Bob, want to play a remote game without a third party where both have an equal win-chance, and where neither can cheat. \subsubsection{Classical solution to the remote coin-flip problem} In the same paper, the problem was solved by constructing a one-way function $f$ which is two-to-one, meaning that one can have a pair of inputs $x$ and $y$ that map to the same value $f(x)=f(y)$. An example of such a function is $f(x) =|1/x|$, because every allowed input $x$ has a counterpart $-x$ that $f$ maps to the same value (except for $0$, which is undefined). Furthermore, $x$ and $y$ have some distinguishing property, for instance, $x$ is always even and $y$ is always odd. Because of the one-way nature of $f$, Alice can randomly select $x$ but it is computationally impossible for her to also find out the corresponding $y$. After selecting $x$, Alice sends $f(x)=c_{\text{Alice}}$ to Bob (her \textit{commitment} to $x$), and from $f(x)$ Bob cannot know whether Alice used $x$ or $y$. \footnote{ This idea is reminiscent of a zero-knowledge proof~\cite{goldwasser1989knowledge}, where one party proves they posses certain information without revealing to the other side what that information is. Alice proofs to Bob she committed to one of two choices without revealing her actual choice. } So, Bob can proclaim a guess to Alice with a 50\% chance of getting it right. Subsequently, Alice can claim victory or defeat and proof her original choice of $x$ by sending $x$ to Bob. Bob can verify the claim by recognising that $x$ is in fact even, and by computing $f(x)=c_{\text{Alice}}$. Alice does not possess $y$, so she cannot possibly cheat by sending Bob an odd value that $f$ maps to $c_{\text{Alice}}$. A benefit to this problem is that if the messages between Alice and Bob are signed, any attempt at cheating on the part of Alice (say she sends $x'$ such that $f(x') \neq c_{\text{Alice}}$) can be proved to a judge. \subsection{Need for new solutions} Many cryptography methods are founded on the assumption that some procedures, such as prime factorisation, are computationally hard, i.e. $P \neq NP$. A prime example of this is prime factorisation, or computing discrete logarithms. However, this assumption may yet be invalidated by breakthroughs in algorithm design, or, more on topic, the introduction of quantum computing that can execute algorithms such as Shor's algorithm, which can do prime factorisation in polynomial time~\cite{bernstein2017post}. In the next section, we will summarise the quantum methods for solving the problems, which rely on fundamental properties of physics for security. \section{Problem solution} Before presenting the solutions for public-key exchange and remote coin-flipping, we briefly introduce some of the fundamental properties of photons that will be used in both solutions. In our following discussion, we assume \emph{good} but not \emph{perfect} quantum hardware (when it comes to the ability to pick out, send, receive and measure individual photons). Perfect hardware would enable cheating the coin-flip with the EPR effect, as discussed in section~\ref{sec:cheating-epr}. \subsection{The behaviour of polarised photons} Light exists of individual photon particles, which can be polarised at any angle using polarising filters. Four of these polarisation axis are important to us: two rectilinear (0 degrees, 90 degrees) and two diagonal (45 degrees, 135 degrees). To measure the orientation of a polarised photon, one can again use polarising filters. If the orientation (or basis) of the filter matches the orientation of the photon, the photon will pass through, and if the filter is orthogonal to the photon, the photon will not pass through. In other words, rectilinear photons can be measured accurately 100\% of the time (deterministically) if one uses the `correct', rectilinear filter, since the difference in angle between photon and a filter at 0 degrees is always either 0 or 90 degrees. But suppose the difference is 45 degrees, such as is always the case when you try to measure rectilinear photons with a `wrong', diagonal filter. It happens that the probability of photons passing through this filter is exactly 50\%, so the measurement will tell you exactly nothing about the original polarisation. Photons that did pass through now have the same polarisation as the filter, so re-measuring them tells you nothing about their original orientation. Furthermore, we cannot simply clone photons and perform multiple measurements. Essentially, you only get one chance to measure a photon, and even though there is theoretically infinite information in a photon (if the polarisation is on a continuous spectrum), measurement will only yield one bit of information. \subsection{Quantum key distribution} In the quantum key distribution protocols, a quantum channel is used to exchange random bits between two parties who haven't initially shared any secret information and then continue conversation using a classic communication channel. As soon as both parties share random bits using a quantum channel, they will be able to verify that quantum key exchange was not disturbed and be able to agree to use the shared secret bits to encrypt classic communication channel. Unlike digital communications that can be monitored and copied, transmissions inside a quantum communication channel cannot be copied or observed without randomly and uncontrollably changing the state, therefore any disturbance will be evident. If communicating parties detect that their exchange was disturbed they will try again to securely share enough random bits using a quantum channel to have a guarantee with high probability that their communication is secure. The protocol for a quantum key exchange is as follows: Using a quantum channel: \begin{enumerate} \item Alice chooses random bit-string, encodes each bits as polarised photons, switching randomly between rectilinear and diagonal bases and sends polarised photons to Bob. \item Bob independent of Alice, randomly chooses what bases to use to measure photons (rectilinear or diagonal polarisation). \end{enumerate} Using a classic channel (with the assumption that it could be monitored, but those messages can't be changed or altered): \begin{enumerate} \setcounter{enumi}{2} \item Bob reveals for each photon what basis he used to measure (rectilinear or diagonal). \item Alice reveals which basis were correct. \item Alice and Bob use the correct bits to generate a key for use in classic communication channel. \end{enumerate} Because photons have to be measured using random choice of rectilinear or diagonal basis, any measurement by an eavesdropper will alter the message Bob gets and subsequently produce disagreement between Alice and Bob on bits that they should agree on. \subsection{The quantum remote coin-flip protocol} The protocol for a quantum remote coin-flip is as follows: \begin{enumerate} \item Alice chooses at random a secret basis (rectilinear or diagonal) and encodes a secret, random bit-string of decent length as polarised photons using her chosen basis. She sends over the photons to Bob on a quantum channel. \item To win, Bob has to guess Alice's choice of basis. But when Bob receives the photons, he does not know how to measure them deterministically to find out their polarisation. The best he can do is switch bases randomly and independently for each photon, so that he at least ends up with a half-filled rectilinear measurement table and a half-filled diagonal measurement table, one of which is 100\% correct and the other is 50\% correct. With nothing to go on, Bob has to make a random guess to Alice as to the original basis. \item Alice reveals her basis, and sends over the original bit-string on a classical channel. \item Bob can verify Alice's choice, because he can confirm that the elements of the bit-string matches up 100\% with the corresponding measurement table. \end{enumerate} Like in the classical solution, the key is that one party has to send a commitment to their choice to the other party, without revealing the choice itself. \section{Benefits and drawbacks of the solutions} Here we discuss the benefits and drawbacks to the solution. \subsection{Advantages of the quantum remote coin-flip protocol} \subsubsection{Impossibility of cheating} The protocol is protected against cheating by the laws of physics, instead of by computational hardness: \begin{itemize} \item Given the polarised photons, Bob cannot guess the basis of the polarisation with a probability greater than $1/2$. \item Alice has to provide the measurement results of the table with the basis she supposedly picked: if she lies about the basis (because Bob guessed it correctly), she would have to guess the contents of the other table. If $n$ photons were sent, the probability that she can perfectly match the table is $\frac{1}{(n/2)^2}=\frac{4}{n^2}$. Getting even one measurement wrong would expose Alice as a cheater. \item Alice could send a mix of rectilinear and diagonal photons, but then she would not be able to verify either table. \end{itemize} \subsection{Common disadvantages and possible attacks for quantum key distribution and coin-tossing} As mentioned in section~\ref{sec:key-dist-problem}, classic information theory assumes that digital communications can be passively monitored. Quantum information theory guarantees that communication cannot be passively monitored and forces the adversary to be active and detectable. However there are possible drawbacks. \subsubsection{No non-repudiation} One of the issues with quantum communication is that there is no way of verifying who sent the individual photons. Digital signatures cannot be created with quantum cryptography. There is no such thing as signing photons. Related to this issue is that an eavesdropper can try to suppress communications by introducing noise into a channel or just by trying to measure photons. This is a threat to availability and integrity, but not to the confidentiality. Also in case of quantum coin-flip Alice or Bob could deny that the coin-flip happened (and the other would not be able to prove to a judge that it did happen). \subsubsection{Generating random bit-strings} The bit-string that Alice encodes needs to be generated by a cryptographically secure pseudo-random number generator. In a post-quantum world, a CSPRNG that relies on prime-factorisation (such as a RSA generator) would be compromised, so it obviously should not be used in an implementation of this protocol. %TODO ref to slides / RSA generator? Maybe Alice should just measure rectilinear photons diagonally, instead. \subsection{Disadvantages of the quantum key distribution} \subsubsection{Theoretical attacks} More sophisticated attacks involve quantum phenomena that have not yet been demonstrated as possible but exist as a theoretical thought experiment. In this case if an adversary can clone or multiply photons he can measure them in multiple bases and choose correct ones after Alice and Bob share correct bases in public channel. \subsection{Disadvantages of the quantum remote coin-flip protocol} \subsubsection{Need for quantum hardware} %TODO probably for general to both Even though a version of the quantum remote coin-flip has been performed experimentally by researchers at the Laboratory for Communication and Processing of Information (LTCI) in Paris~\cite{pappa2014experimental}, there is no prediction for when quantum hardware will become available. This is also good news, because it means that for now we can still rely on computational hardness to safeguard the classical solution. More generally, one could state that quantum solutions will become useful on the same day that classical solutions will be compromised. \subsubsection{Possibility of cheating} \label{sec:cheating-epr} In theory, Alice can cheat by using the Einstein-Podolsky-Rosen effect, which implies that pairs of polarised photons can be created which always collapse to opposite directions (regardless of in which basis they are measured). Alice could encode the bit-string in such pairs, send one photon of each pair to Bob and keep the other. If Bob then guesses, say, rectilinear, she can measure her photons in the diagonal basis, thus being able to match the results of Bob's diagonal table, pretending to have used diagonal encoding all along and verifying her `win'. However, storing and measuring the twin photons with 100\% accuracy is likely impossible in practice, even with good quantum hardware. \section{Implementation of a quantum remote coin-flip protocol simulation} For those who can't wait until quantum computers become commercially available, the Microsoft Q\#~\cite{qsharp} project provides a quantum development kit for expressing quantum algorithms and running simulations of them on a classical machine. With this, we implemented (a simulation of) the quantum remote coin-flip protocol~\cite{quantum-coin-toss-repo}. The end-product is a simulation in two ways: obviously, no actual qubits were used, and secondly, since Q\# does not provide a way to simulate a quantum channel, the communication between Alice and Bob takes place in one process. To implement the protocol, we need to translate photons, polarisation filter, etc. over to the world of quantum computing. \subsection{Qubits and operating on them} Photons are one way to achieve quantum behaviour, but in quantum computing, the implementation (be it photons or otherwise) is abstracted away behind the concept of qubits, which uses linear algebra as foundation. A qubit is represented as a unit vector $\vec{q}=\begin{pmatrix} \alpha \\ \beta\end{pmatrix}$ where $\alpha, \beta \in \mathbb{C}$ and $||\vec{q}||=1$. Here we have four examples of qubits, along with their more convenient Dirac notations: \begin{equation} \label{eq:qubits} \begin{pmatrix} 1 \\ 0\end{pmatrix} = \ket{0}, \quad \begin{pmatrix} 0 \\ 1\end{pmatrix} = \ket{1}, \quad \begin{pmatrix} \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}}\end{pmatrix} = \ket{+}, \quad \begin{pmatrix} \frac{1}{\sqrt{2}} \\ \frac{-1}{\sqrt{2}}\end{pmatrix} = \ket{-}. \end{equation} Qubits can be manipulated using linear transformations, such as the negation operation $X$ and the Hadamard gate operation $H$: \begin{equation} X = \begin{pmatrix} 0 & 1 \\ 1 & 0 \\\end{pmatrix}, \quad H = \begin{pmatrix} \frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} & \frac{-1}{\sqrt{2}} \\\end{pmatrix}. \end{equation} With simple linear algebra we can easily transform our qubits in equation~\ref{eq:qubits} from one into another using $X$ and $H$: \begin{equation} \label{eq:operations} X(\ket{0}) = \ket{1}, \quad H(\ket{0})=\ket{+}, \quad H(\ket{1})=\ket{-}. \end{equation} Since qubits are (unit) vectors, they can be seen as points on the surface of a unit sphere, the so-called Bloch sphere~\cite{bloch1946nuclear}. For our purposes, we don't need qubits with imaginary numbers, so we stay within one slice of the sphere: a unit circle. In figure~\ref{fig:unit-circle}, we can see the effects of using $X$ and $H$ on qubits in different states. \begin{figure} \centering \includegraphics[width=\linewidth]{microsoft-quantum-coding-unit-circle} \caption{The unit circle state machine, displaying the effects of $X$ and $H$ operations. Taken from the supporting slides of a Microsoft talk: \emph{`Quantum Computing for Computer Scientists'}~\cite{quantum-computing-talk}.} \label{fig:unit-circle} \end{figure} \subsection{Measuring qubits} The $\alpha$ and $\beta$ values determine the superposition or quantum state of the qubit, but we cannot obtain these values directly. Instead, we have to measure $\vec{q}$, and the only possible outcomes are either $0$ or $1$, ie. a classical bit (or cbit). The result of a measurement $M$ is probabilistic, and depends on the value of $\alpha$ and $\beta$: \begin{equation} \label{eq:measuring-distribution} P(M=0) = |\alpha|^2, \quad P(M=1) = |\beta|^2. \end{equation} Just like with photons, the basis in which we measure can be varied, so we have to specify that too: in this case using the Pauli measurements. Two Pauli measurements are of importance: Pauli Z and Pauli X. Measuring Pauli Z,~$\ket{0}$ always results in~$0$ and ~$\ket{1}$ always in~$1$. However, when measuring Pauli Z of either~$\ket{-}$ or~$\ket{+}$ we get an uniformly random outcome of $0$ and $1$. This is in line with the above equation~\ref{eq:measuring-distribution}. With Pauli X, things are turned around: measuring Pauli X of~$\ket{-}$ and~$\ket{+}$ deterministically leads to~$0$ and~$1$ (respectively), but when measuring Pauli X of~$\ket{0}$ and~$\ket{1}$ there is a 50\% chance of~$0$, 50\% chance of~$1$. On a side-note, measuring Pauli X of~$\vec{q}$ is equivalent to first performing~$H$, and then measuring Pauli Z. Just like in other quantum systems, no re-measurements is possible (the qubit is said to have collapsed to one of two states) and no cloning is possible (which prevents statistical analysis of a single qubit). \subsection{From photons to qubits} With this foundation of quantum computing, we are ready to define the quantum computing equivalent of the quantum remote coin-flip protocol: \begin{itemize} \item A photon with a polarisation axis of~$\theta$ degrees can be represented as a qubit with $\alpha=\cos{\theta}$ and $\beta=\sin{\theta}$. Specifically, $\ket{0}$ and $\ket{1}$ are our rectilinearly polarised photons, and ~$\ket{-}$ and~$\ket{+}$ are the diagonal ones. \item To encode a bit from a bit-string, we can start from the $\ket{0}$ qubit and use the $X$ and $H$ operations (see equations~\ref{eq:operations}) to achieve any of the four polarisations. \item To measure with a rectilinear or diagonal filter, we can measure Pauli Z or Pauli X, respectively. \end{itemize} \subsection{Q\# code sample} The following Q\# operation performs the encoding, from a $\ket{0}$ qubit, rectilinear/diagonal basis and 0/1 bit: \begin{lstlisting}[caption={\texttt{Operations.qs}}] operation Encode (qubit : Qubit, rectilinear : Bool, bit : Bool) : Unit { if (rectilinear && not(bit)){ // do nothing, qubit is already |0> return (); // since there is no `else if` statement in Q#, we return after each if-statement } if (rectilinear && bit) { X(qubit); // |1> return (); } if (not(rectilinear) && not(bit)) { H(qubit); // |+> return (); } if (not(rectilinear) && bit) { X(qubit); // |1> H(qubit); // |-> return (); } } \end{lstlisting} \bibliography{tex/sources} \bibliographystyle{acm} \end{document}
{ "alphanum_fraction": 0.7666963491, "avg_line_length": 66.0588235294, "ext": "tex", "hexsha": "0be0e9912cc83dd3663ba2e4e784df2440e22267", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d01bd169284c73adabc38a1e6e1b4c93181034ea", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hbierlee/quantum-coin-toss", "max_forks_repo_path": "tex/report/report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d01bd169284c73adabc38a1e6e1b4c93181034ea", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hbierlee/quantum-coin-toss", "max_issues_repo_path": "tex/report/report.tex", "max_line_length": 476, "max_stars_count": null, "max_stars_repo_head_hexsha": "d01bd169284c73adabc38a1e6e1b4c93181034ea", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hbierlee/quantum-coin-toss", "max_stars_repo_path": "tex/report/report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5407, "size": 22460 }
\section{Wrap-up} \subsection{} \begin{frame} \frametitleTC{Ideas and capabilities to take home} \framesubtitleTC{(1/2) not necessarily in the same order as we saw them...} \begin{itemize}[<+-| alert@+>] \item Interpreting the state space description of a DT LTI dynamic system\\ as scalar equations. \item Computing a system's transfer function from its state space description. \item Computing state and output responses in the time domain, and distinguishing\\ free and induced motion. \item Commonly used signals: impulse, step, ramp. \end{itemize} \end{frame} \begin{frame} \frametitleTC{Ideas and capabilities to take home} \framesubtitleTC{(2/2)} \begin{itemize}[<+-| alert@+>] \item Poles and zeroes of a transfer functions, the former being eigenvalues\\ of the dynamic matrix. \item Pole/zero cancellations and hidden parts. \item Writing a dynamic system as block diagram and recognise the inherent\\ feedback in it. \item Combining blocks into overall transfer functions: for the moment series and parallel, more on this subject later on. \item A possible way to transform a transfer function into a state space\\ representation. \item Stability and eigenvalues of the dynamic matrix (for the curious,\\ we proved our statement by writing that matrix in \TC{Jordan} form). \end{itemize} \end{frame}
{ "alphanum_fraction": 0.7385620915, "avg_line_length": 39.3428571429, "ext": "tex", "hexsha": "a68eeb1c0c93b86f9b638808d29de6c222877154", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "66ec14c204e16c97a5792c2e240b2daed4b39e83", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "albertoleva/PID4CSE", "max_forks_repo_path": "slides/Unit-03/sections/05-PS01-wrapup.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "66ec14c204e16c97a5792c2e240b2daed4b39e83", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "albertoleva/PID4CSE", "max_issues_repo_path": "slides/Unit-03/sections/05-PS01-wrapup.tex", "max_line_length": 97, "max_stars_count": 1, "max_stars_repo_head_hexsha": "66ec14c204e16c97a5792c2e240b2daed4b39e83", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "albertoleva/PID4CSE", "max_stars_repo_path": "slides/Unit-03/sections/05-PS01-wrapup.tex", "max_stars_repo_stars_event_max_datetime": "2019-04-19T16:38:10.000Z", "max_stars_repo_stars_event_min_datetime": "2019-04-19T16:38:10.000Z", "num_tokens": 335, "size": 1377 }
\documentclass[]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \else % if luatex or xelatex \ifxetex \usepackage{mathspec} \else \usepackage{fontspec} \fi \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \usepackage[margin=1in]{geometry} \usepackage{hyperref} \hypersetup{unicode=true, pdftitle={Real Estate Market Analysis}, pdfauthor={Maciej Beręsewicz}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{natbib} \bibliographystyle{apalike} \usepackage{longtable,booktabs} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi %%% Use protect on footnotes to avoid problems with footnotes in titles \let\rmarkdownfootnote\footnote% \def\footnote{\protect\rmarkdownfootnote} %%% Change title format to be more compact \usepackage{titling} % Create subtitle command for use in maketitle \newcommand{\subtitle}[1]{ \posttitle{ \begin{center}\large#1\end{center} } } \setlength{\droptitle}{-2em} \title{Real Estate Market Analysis} \pretitle{\vspace{\droptitle}\centering\huge} \posttitle{\par} \author{Maciej Beręsewicz} \preauthor{\centering\large\emph} \postauthor{\par} \predate{\centering\large\emph} \postdate{\par} \date{2017-05-09} \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \chapter{Prerequisites}\label{prerequisites} \chapter{Introduction}\label{intro} \chapter{Data sources on real estate market}\label{data-sources} \chapter{Internet data sources}\label{internet-data-sources} \chapter{Characteristics of Real Estate Market in Poland and Europe}\label{characteristics-of-real-estate-market-in-poland-and-europe} \chapter{Descriptives statistics as basic method of analysis of Real Estate Market}\label{descriptives-statistics-as-basic-method-of-analysis-of-real-estate-market} \chapter{Data cleaning techniques and outliers detection}\label{data-cleaning-techniques-and-outliers-detection} \section{Outliers and influence measures}\label{outliers-and-influence-measures} \textbf{Leverage} given by \[ h_i = \frac{1}{n} + \frac{(X_i - \overline{X})}{\sum(X_i - \overline{X})^2} \] \textbf{Studentized residuals} given by \[ e_{i}^{*}=\frac{e_i}{S_{e(-1)}\sqrt{1-h_i}} \] where: \(e_i\) - residual, \(S_{e(-1)}\) - standard error of the regression without i-th observation. Studentized residuals follow t-distribution with \(n-k-2\) degress of freedom. \textbf{Cook distance} \[ D_i=\frac{e_i}{k+1}\frac{h_i}{1-h_i} \] where \(k\) -- number of dependent variables, \(h_i = \frac{1}{n} + \frac{(X_i - \overline{X})}{\sum(X_i - \overline{X})^2}\) , \(MSE=\frac{1}{n}\sum_{i=1}^n(\hat{Y}_i-Y_i)^2\) outliers meet: \[ D_i>\frac{4}{n-k-1} \] \textbf{DFBETA} measures change in estimates of regression parameters when we remove one observation \[ DFBETA_i=(\sum_{i \in s} \mathbf{x}_i\mathbf{x}_i^T)^{-1}\mathbf{x}_i\frac{e_i}{1-\mathbf{x}_i^T(\sum_{i \in s} \mathbf{x}_i\mathbf{x}_i^T)^{-1}\mathbf{x}_i} \] \textbf{DFBETAS} - standarised version of DFBETA. Measures influence in units of standard error of regression. \[ DFBETAS_i=\frac{\hat{\mathbf{\beta}}-\hat{\mathbf{\beta}}_{(-i)}}{\sqrt{MSE_{(-i)}}}=\frac{DFBETA_i}{\sqrt{MSE_{(-i)}}} \] Outliers meet: \begin{itemize} \tightlist \item \(|DFBETAS_i|>2\) - small samples \item \(DFBETAS_i>\frac{2}{\sqrt{n}}\) \end{itemize} \textbf{DFFITS} -- measures global difference between model with and without \emph{i} observation. \[ DFFITS_i=\frac{e_i\sqrt{\frac{h_i}{1-h_i}}}{\sqrt{MSE_{(-i)}}\sqrt{{1-h_i}}} \] Outliers meet \(|DFFITS_i| > 2\sqrt{\frac{p+1}{n-k-1}}\) \textbf{CovRatio} -- measures influence on variance of regression coefficients \[ COVRATIO_i=\frac{1}{(\frac{n-k-2+t_i^2}{n-k-1})^{k+2}}\frac{1}{(1-h_i)} \] where \(h_i\) is the same as in Cook's distance, \(t_i\) is defined \[ t_i=\frac{e_i}{\sqrt{MSE_{(-i)}}\sqrt{{1-h_i}}} \] Interpretation: \begin{itemize} \tightlist \item \(COVRATIO_i < 1\) - elimination of \(i\) th unit/observation will reduce standard errors of regression coefficients \item \(COVRATIO_i > 1\) - elimination of \(i\) th unit/observation will increase standard errors of regression coefficients \end{itemize} it is suggested to use sample size dependent thresholds \[ |COVRATIO_i-1| > 3(k+1)/n \] \chapter{Indices}\label{indices} \chapter{Regression}\label{regression} \chapter{GIS Tools}\label{gis-tools} \chapter{Spatial models}\label{spatial-models} \chapter{Forecasting}\label{forecasting} \chapter{Placeholder}\label{placeholder} \chapter{Additional topics}\label{additional-topics} \bibliography{packages.bib,book.bib} \end{document}
{ "alphanum_fraction": 0.729153605, "avg_line_length": 27.0338983051, "ext": "tex", "hexsha": "c0cbbe47177d10ef029078a828733efb1f0e2fc5", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "874f5240ca24adcaf97b49d0999be204d097c25d", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "DepartmentOfStatisticsPUE/real-estate-market-analysis-book", "max_forks_repo_path": "_main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "874f5240ca24adcaf97b49d0999be204d097c25d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "DepartmentOfStatisticsPUE/real-estate-market-analysis-book", "max_issues_repo_path": "_main.tex", "max_line_length": 157, "max_stars_count": null, "max_stars_repo_head_hexsha": "874f5240ca24adcaf97b49d0999be204d097c25d", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "DepartmentOfStatisticsPUE/real-estate-market-analysis-book", "max_stars_repo_path": "_main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2142, "size": 6380 }
\documentclass[11pt,letterpaper]{article} \usepackage{fullpage} \usepackage[top=1.75cm, bottom=4cm, left=2.5cm, right=2.5cm]{geometry} \usepackage{amsmath,amsthm,amsfonts,amssymb,amscd} \usepackage{lastpage} \usepackage{enumerate} \usepackage{fancyhdr} \usepackage{mathrsfs} \usepackage{xcolor} \usepackage{graphicx} \usepackage{listings} \usepackage{hyperref} \usepackage{tcolorbox} \usepackage{bbm} \usepackage{cite} \usepackage[numbers]{natbib} \hypersetup{% colorlinks=true, urlcolor=blue, citecolor=blue } \renewcommand\lstlistingname{Algorithm} \renewcommand\lstlistlistingname{Algorithms} \def\lstlistingautorefname{Alg.} \lstdefinestyle{Python}{ language = Python, frame = lines, basicstyle = \footnotesize, keywordstyle = \color{blue}, stringstyle = \color{green}, commentstyle = \color{red}\ttfamily } \setlength{\parindent}{0.0in} \setlength{\parskip}{0.05in} \newtcolorbox{cbox}[3][] { colframe = #2!25, colback = #2!10, coltitle = #2!20!black, title = {#3}, #1, } \newcommand\course{CS 674} \newcommand\instructor{Dr. Wingate} \newcommand\name{Jake Callahan, Taylor Paskett} \pagestyle{fancyplain} \headheight 32pt \lhead{\name \\ \today} \chead{ \LARGE \textbf{Project 1 Proposal}} \rhead{\course \\ \instructor} \lfoot{} \cfoot{} \rfoot{\small\thepage} \headsep 1.5em \begin{document} \section{Description of the Problem} Artificial neural networks (ANN) are inspired by their biological counterparts. One function of biological neural networks is their ability to store and retrieve memories. What makes them unique is the fact that these memories are stored within the network structure. In comparison, artifical neural network structures approximate functions using their parameters, rather than saving data in them. We wish to design an ANN that also saves memories in its structure. The way we've thought of accomplishing this is through the use of an invertible variational autoencoder. Because it's invertible, it will learn a bijective map that allows a "memory" to be almost perfectly reconstructed. The reconstruction will primarily use information from the network structure. Because the ANN is invertible, its output can be fed backward through the network instead of through a separate decoder. This problem is interesting for many reasons. We would like to try playing with the latent space variables to create "false memories" (a common task with autoencoders, like creating fake celebrity pictures). We are also interested to see if this can be used as a form of data compression. We were able to find a paper from 2019 where the authors implemented an invertible autoencoder, and it showed promising results (see \href{https://doi.org/10.1007/978-3-030-33676-9_31}{[1]}). We believe we can improve their results through use of different loss functions and by varying the architecture. We are also interested in the information-theoretic applications of an invertible autoencoder. One question we are curious about is if we can construct some information-theoretic norm such that the INN output is continuous with respect to its latent space variables. \section{How will Deep Learning Be Used?} Invertible neural networks (INNs) and variational autoencoders (VAEs) are deep learning architectures that have been the focus of much study. We will design, implement, and train an INN-VAE hybrid architecture. \section{Data} We will use CIFAR-10 and CelebA to train and test our INN-VAE. We won't need to collect any data on our own, as we will only use these easily available datasets. \section{Training} We hope to train our INN-VAE using Google Colab. If Colab is insufficient, Taylor has access to the BYU supercomputer, so we can use the GPUs there. \section{Distribution of Labor} This will be a group project between Jake Callahan and Taylor Paskett. Taylor will train the basic models (similar to the structure from \href{https://doi.org/10.1007/978-3-030-33676-9_31}{[1]}). Jake will train our improved models. We will work together on the theoretic question. \end{document}
{ "alphanum_fraction": 0.7780225159, "avg_line_length": 38.9142857143, "ext": "tex", "hexsha": "1cb913c6d6dcbd3eea17ad7d76537399b407ca06", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a11b1327b640183159f29cf08dbf6341b65cf290", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "paskett/advanceddeeplearning", "max_forks_repo_path": "INN_VAE/project_proposal/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a11b1327b640183159f29cf08dbf6341b65cf290", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "paskett/advanceddeeplearning", "max_issues_repo_path": "INN_VAE/project_proposal/main.tex", "max_line_length": 191, "max_stars_count": null, "max_stars_repo_head_hexsha": "a11b1327b640183159f29cf08dbf6341b65cf290", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "paskett/advanceddeeplearning", "max_stars_repo_path": "INN_VAE/project_proposal/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1073, "size": 4086 }