Search is not available for this dataset
text
string | meta
dict |
---|---|
%%% SOME OF THIS CODE IS ADAPTED FROM THE VENERABLE withesis.cls
% COPYRIGHT PAGE
% - To include a copyright page use \copyrightpage
\copyrightpage
% DEDICATION
\begin{dedication}
\emph{For Steve}
\end{dedication}
%% BEGIN PAGESTYLE
%%% You can pick a pagestyle if you want; see the memoir class
%%% documentation for more info. The default ``deposit'' option meets
%%% the UW thesis typesetting requirements but is probably
%%% unsatisfactory for making a version of your dissertation that
%%% won't be deposited to the graduate school (e.g. for web or a nice
%%% printed copy)
\chapterstyle{deposit}
\pagestyle{deposit}
% ACKNOWLEDGMENTS
\begin{acks}
\input{frontmatter/acks}
\end{acks}
% CONTENTS, TABLES, FIGURES
\renewcommand{\printtoctitle}[1]{\chapter*{#1}}
\renewcommand{\printloftitle}[1]{\chapter*{#1}}
\renewcommand{\printlottitle}[1]{\chapter*{#1}}
\renewcommand{\tocmark}{}
\renewcommand{\lofmark}{}
\renewcommand{\lotmark}{}
\renewcommand{\tocheadstart}{}
\renewcommand{\lofheadstart}{}
\renewcommand{\lotheadstart}{}
\renewcommand{\aftertoctitle}{}
\renewcommand{\afterloftitle}{}
\renewcommand{\afterlottitle}{}
\renewcommand{\cftchapterfont}{\normalfont}
\renewcommand{\cftsectionfont}{\itshape}
\renewcommand{\cftchapterpagefont}{\normalfont}
\renewcommand{\cftchapterpresnum}{\bfseries}
%\renewcommand{\cftchapterleader}{}
%\renewcommand{\cftsectionleader}{}
%\renewcommand{\cftchapterafterpnum}{\cftparfillskip}
%\renewcommand{\cftsectionafterpnum}{\cftparfillskip}
% \captionnamefont{\small\sffamily}
% \captiontitlefont{\small\sffamily}
% \renewcommand{\contentsname}{contents}
% \renewcommand{\listfigurename}{list of figures}
% \renewcommand{\listtablename}{list of tables}
\tableofcontents
\clearpage
\listoftables
\clearpage
\listoffigures
\clearpage
% NOMENCLATURE
% \begin{conventions}
% % \begin{description}
% % \item{\makebox[0.75in][l]{term}
% % \parbox[t]{5in}{definition\\}}
% % \end{description}
% \input{conventions}
% \end{conventions}
%% The UW graduate school no longer wants a UMI abstract page
%% Should you need one for some reason, uncomment the following
%% lines. Thanks to Matt Fredrikson for reporting this!
% \advisorname{Gottlob Frege}
% \advisortitle{Professor}
% \begin{umiabstract}
% \input{frontmatter/abstract}
% \end{umiabstract}
\begin{abstract}
\input{frontmatter/abstract}
\end{abstract}
\clearpage\pagenumbering{arabic}
%%% END STUFF TAKEN FROM WITHESIS EXAMPLE FILE
| {
"alphanum_fraction": 0.7496967246,
"avg_line_length": 25.2346938776,
"ext": "tex",
"hexsha": "5129fba037d9f8927d6c037da73392a23d521988",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "opotowsky/prelim",
"max_forks_repo_path": "document/frontmatter/frontmatter.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "opotowsky/prelim",
"max_issues_repo_path": "document/frontmatter/frontmatter.tex",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "opotowsky/prelim",
"max_stars_repo_path": "document/frontmatter/frontmatter.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 759,
"size": 2473
} |
%!TEX root = ../notes.tex
\section{April 4, 2022}
\subsection{Quantum Computing \emph{continued}}
\recall Deutch's Algorithm. Given black box that implements $f : \{0, 1\}\to \{0, 1\}$. We define $F : \{0, 1\}^2\to \left\{ 0, 1 \right\}^2$ via $F(x, y) = (x, f(x) + y\pmod{2})$.
The problem was to determine whether $f(0) \overset{?}{=}f(1)$. Our solution was to compute $F$ on a superposition of two states.
\[F\left(\frac{1}{\sqrt{2}}\Ket{0} + \frac{1}{\sqrt{2}}\Ket{0}, \frac{1}{\sqrt{2}}\Ket{0} - \frac{1}{\sqrt{2}}\Ket{0}\right) = \frac{1}{2}\left((-1)^{f(0)}\Ket{0} + (-1)^{f(1)}\Ket{1}\right)\otimes\left( \Ket{0} - \Ket{1} \right)\]
where we apply a rotation to the possible outcomes. Our key transformation is
\[(x, y)\mapsto \left( \frac{1}{\sqrt{2}}x + \frac{1}{\sqrt{2}}y, \frac{1}{\sqrt{2}}x - \frac{1}{\sqrt{2}}y \right)\]
which is a \emph{rotation by $45^\circ$}.
We discuss a generalization of this which is the Discrete Fourier transformation.
\begin{definition}[Discrete Fourier Transform]
Given $x_1, x_2, \dots, x_N$, the \ul{Discrete Fourier Transform} (DFT) is a new sequence $y_1, y_2, \dots, y_N$ defined via:
\[y_k = \frac{1}{\sqrt{N}}\sum_{j} e^{-\frac{2\pi i}{N}\cdot jk}x_j\]
\end{definition}
Note that this is the discrete analog of the ordinary Fourier Transform:
\[f(x)\rightsquigarrow \hat{f}(y) = \int e^{2\pi ixy}f(x)\ dx\]
\begin{example}
For $N = 2$, we have $x_1, x_2$ so
\begin{align*}
y_1 & = \frac{1}{\sqrt{2}}x_1 + \frac{1}{\sqrt{2}}x_2 \\
y_2 & = \frac{1}{\sqrt{2}}x_1 - \frac{1}{\sqrt{2}}x_2
\end{align*}
\end{example}
we note that these are efficiently computable in the quantum setting.
\subsection{Shor's Algorithm}
\begin{ques*}
Given a black box that implements a function $f : \ZZ \to X$ (assumed to be periodic). What is the period of $f$?
This is to say, we're promised that $f(x) = f(x + n)$ for some $n$, we are tasked to find $n$.
\end{ques*}
Classically, we take
\[f(1), f(2), \dots, f(n+1)\]
where $f(n+1) = f(1)$. The runtime is $\mathcal{O}(n)$.
We try to solve this using a quantum algorithm:
\begin{enumerate}
\item Choose $N$ large power of $2$ (we want $N = \mathcal{O}(n^2)$).
\item Prepare the state
\[\frac{1}{\sqrt{N}}\sum_{j=1}^N \Ket{j}\otimes \Ket{f(j)}.\]
\item Apply DFT to first register. That is,
\begin{align*}
\frac{1}{N}\sum_{k, x} \left( \sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}jk} \right)\Ket{k}\oplus\Ket{x}
\end{align*}
\item Measure first register.
\end{enumerate}
How big is this sum?
\[
\sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}jk}
\]
We note that this sum
\[
\sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}jk} \approx \sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}(j+n)k} = e^{-\frac{2\pi ink}{N}}\cdot \sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}jk}
\]
therefore
\[\sum_{j: f(j) = x} e^{-\frac{2\pi i}{N}jk} \approx 0\quad\text{unless}\quad e^{-\frac{2\pi ink}{N}}\approx 1 \iff nk\text{ is close to a multiple of $N$}\]
This is to say, $k$ is approximately a multiple of $\frac{N}{n}$. So we get $\frac{N}{n}$ out of this register.
\emph{Why might I get a multiple?}
\begin{example}
Consider sequence
\[\boxed{0, 1, 0, 1, 0, 1, 0, 0}, 0, 1, 0, 1, 0, 1, 0, 0, , 0, 1, 0, 1, 0, 1, 0, 0, \dots\]
where the period is $8$. We'll usually get $\approx \frac{N}{\text{period}}$ unless there is a smaller ``almost-period'' where we might get $\frac{N}{\text{almost-period}}$ but the almost-period divides the period.
\end{example}
The runtime of this algorithm is $\approx \log n$.
\subsection{Breaking Encryption}
\subsubsection{Integer Factorization}
We're given $N = pq$. We pick $x\in \ZZ/N\ZZ$ and consider the function $f(j) = x^j\pmod{N}$. Applying Shor's Algorithm, we can determine the period of $f$, which is the order of $x$, which is a factor of $(p-1)(q-1)$. More precisely, $\frac{(p-1)(q-1)}{\text{something small}}$. We recover $(p-1)(q-1)$ and we can factor $N$ easily. This is bad news for RSA!
\subsubsection{Quantum Elgamal/DLP}
We have some group $G$ and $g\in G$ where we want to recover $k$ from $x=g^k$.
Consider function $f(a, b) = x^a \cdot g^{-b}$ (we do a 2-dimensional DFT). $f(a, b) = f(a+1, b+k)$. So $(1, k)$ is the period of $f$. This solves the discrete log problem \emph{in any group}! | {
"alphanum_fraction": 0.617899374,
"avg_line_length": 53.2469135802,
"ext": "tex",
"hexsha": "c365fabc7f927c2dd5fe79fbcf5fe1f2b43e1228",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9784be9e0faa57bbb3c421d8a104daadebf99a2f",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "jchen/math1580-notes",
"max_forks_repo_path": "lectures/2022-04-04.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9784be9e0faa57bbb3c421d8a104daadebf99a2f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "jchen/math1580-notes",
"max_issues_repo_path": "lectures/2022-04-04.tex",
"max_line_length": 359,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9784be9e0faa57bbb3c421d8a104daadebf99a2f",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "jchen/math1580-notes",
"max_stars_repo_path": "lectures/2022-04-04.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-14T15:03:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-14T15:03:38.000Z",
"num_tokens": 1686,
"size": 4313
} |
%%
%% Copyright 2007, 2008, 2009 Elsevier Ltd
%%
%% This file is part of the 'Elsarticle Bundle'.
%% ---------------------------------------------
%%
%% It may be distributed under the conditions of the LaTeX Project Public
%% License, either version 1.2 of this license or (at your option) any
%% later version. The latest version of this license is in
%% http://www.latex-project.org/lppl.txt
%% and version 1.2 or later is part of all distributions of LaTeX
%% version 1999/12/01 or later.
%%
%% The list of all files belonging to the 'Elsarticle Bundle' is
%% given in the file `manifest.txt'.
%%
%% Template article for Elsevier's document class `elsarticle'
%% with numbered style bibliographic references
%% SP 2008/03/01
\documentclass[preprint,12pt]{elsarticle}
%% Use the option review to obtain double line spacing
%% \documentclass[authoryear,preprint,review,12pt]{elsarticle}
%% Use the options 1p,twocolumn; 3p; 3p,twocolumn; 5p; or 5p,twocolumn
%% for a journal layout:
%% \documentclass[final,1p,times]{elsarticle}
%% \documentclass[final,1p,times,twocolumn]{elsarticle}
%% \documentclass[final,3p,times]{elsarticle}
%% \documentclass[final,3p,times,twocolumn]{elsarticle}
%% \documentclass[final,5p,times]{elsarticle}
%% \documentclass[final,5p,times,twocolumn]{elsarticle}
%% For including figures, graphicx.sty has been loaded in
%% elsarticle.cls. If you prefer to use the old commands
%% please give \usepackage{epsfig}
%% The amssymb package provides various useful mathematical symbols
\usepackage{amssymb}
%% The amsthm package provides extended theorem environments
%% \usepackage{amsthm}
%% The lineno packages adds line numbers. Start line numbering with
%% \begin{linenumbers}, end it with \end{linenumbers}. Or switch it on
%% for the whole article with \linenumbers.
%% \usepackage{lineno}
\journal{Nuclear Physics B}
\begin{document}
\begin{frontmatter}
%% Title, authors and addresses
%% use the tnoteref command within \title for footnotes;
%% use the tnotetext command for theassociated footnote;
%% use the fnref command within \author or \address for footnotes;
%% use the fntext command for theassociated footnote;
%% use the corref command within \author for corresponding author footnotes;
%% use the cortext command for theassociated footnote;
%% use the ead command for the email address,
%% and the form \ead[url] for the home page:
%% \title{Title\tnoteref{label1}}
%% \tnotetext[label1]{}
%% \author{Name\corref{cor1}\fnref{label2}}
%% \ead{email address}
%% \ead[url]{home page}
%% \fntext[label2]{}
%% \cortext[cor1]{}
%% \address{Address\fnref{label3}}
%% \fntext[label3]{}
\title{}
%% use optional labels to link authors explicitly to addresses:
%% \author[label1,label2]{}
%% \address[label1]{}
%% \address[label2]{}
\author{}
\address{}
\begin{abstract}
%% Text of abstract
\end{abstract}
\begin{keyword}
%% keywords here, in the form: keyword \sep keyword
%% PACS codes here, in the form: \PACS code \sep code
%% MSC codes here, in the form: \MSC code \sep code
%% or \MSC[2008] code \sep code (2000 is the default)
\end{keyword}
\end{frontmatter}
%% \linenumbers
%% main text
\section{}
\label{}
%% The Appendices part is started with the command \appendix;
%% appendix sections are then done as normal sections
%% \appendix
%% \section{}
%% \label{}
%% If you have bibdatabase file and want bibtex to generate the
%% bibitems, please use
%%
%% \bibliographystyle{elsarticle-num}
%% \bibliography{<your bibdatabase>}
%% else use the following coding to input the bibitems directly in the
%% TeX file.
\begin{thebibliography}{00}
%% \bibitem{label}
%% Text of bibliographic item
\bibitem{}
\end{thebibliography}
\end{document}
\endinput
%%
%% End of file `elsarticle-template-num.tex'.
| {
"alphanum_fraction": 0.7207826547,
"avg_line_length": 27.4057971014,
"ext": "tex",
"hexsha": "29c14cc9ce8269f832fee98fe864261ebfa7f877",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2017-12-18T18:05:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-18T18:05:38.000Z",
"max_forks_repo_head_hexsha": "c5dd8b671a990fb5d06d01e1d865b55db7b0f2f9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "CenterForPeaceAndSecurityStudies/MSSLStyleGuide",
"max_forks_repo_path": "paper/templates/elsarticle/elsarticle-template-num.tex",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "34a66098aba7da7626556a7dd4b5bb21f360cd47",
"max_issues_repo_issues_event_max_datetime": "2018-01-12T00:51:18.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-12-18T18:11:33.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jgarciab/8_months_paper",
"max_issues_repo_path": "2016_05_8mp/journal-formats/elsevier-article/original/elsarticle-template-num.tex",
"max_line_length": 76,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "34a66098aba7da7626556a7dd4b5bb21f360cd47",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jgarciab/8_months_paper",
"max_stars_repo_path": "2016_05_8mp/journal-formats/elsevier-article/original/elsarticle-template-num.tex",
"max_stars_repo_stars_event_max_datetime": "2019-08-19T07:16:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-06T08:46:02.000Z",
"num_tokens": 1059,
"size": 3782
} |
\chapter*{Jian Shi's Individual Contribution Report}
Our capstone project is about ``RISC-V SoC Microarchitecture Design and Optimization''. In this project, we first design an out-of-order RISC-V processor supporting 4-way superscalar execution and instruction dynamic scheduling. What’s more, we also try to optimize the SoC with approximate computing units. During the project, all team members make their own contribution to our design and we are satisfied about our final product.
To design and optimize a processor based on RISC-V ISA, we need a lot of efforts, from the hardware part, to the software part. In our group, Li Shi, Yiqiu Sun and I are responsible for hardware engineering, while Yichao Yuan and Zhiyuan Liu are responsible for software engineering. Every week, we hold the group meetings twice, on Wednesday and Saturday. The software part and the hardware part work together to build our own RISC-V processor. We roughly make the same contribution to the processor design. Each member’s contribution to the project is as follows.
\begin{enumerate}
\item Li Shi focuses on hardware backend design, including instruction dispatch, issue units, scoreboard, physical register file and part of execution units. On the other hand, he also takes part in integrating the microarchitecture and debugging. His workload is roughly 15 hours per week.
\item Yiqiu Sun focuses on designing and integrating the overall microarchitecture. She provides technical support for the team and designs the instruction fetch unit and branch predictor. What’s more, she helps other team members review the code, raises many constructive suggestions, and improves the overall design quality. Her workload is roughly 15 hours per week.
\item Yichao Yuan focuses on software simulation and validation. He helps the team to validate the processor design in software design tools, including Xilinx Vivado, Verilator, Spike, etc. Besides, although not implemented in the final design, he explores the instruction and data cache design and AXI bus protocol for the SoC. His workload is roughly 15 hours per week.
\item Zhiyuan Liu is focuses on compilation workflow. She builds and modifies compiler toolchains, which is important for embedded system construction. She also adds our custom RISC-V instructions for approximate computing to the compiler so that users can use these custom instructions in their C programs. Her workload is roughly 15 hours per week.
\item I focus on designing the hardware frontend, including register renaming table, free list, re-order buffer. I also design the floating-point units in the execution units in the backend. Besides, during the preparation stage of this project, I am partially responsible for the survey of open-source RISC-V core and SoC and read some academic articles. My workload is roughly 15 hours per week.
\end{enumerate}
As previously mentioned, I am the hardware engineer in our team, and my job mainly focuses on register transfer level (RTL) design and optimization.
In terms of the technical part, it is my first time to design and test a System-on Chip (SoC), which means that I need to learn a lot of concepts in computer architecture, embedded systems and logic circuit design. On the other hand, in my four-year university study, I only have experience in a five-stage MIPS CPU design with in-order execution using Verilog language. Therefore, it is also my first time to take part in design for a processor core that supports execution and instruction dynamic scheduling with SystemVerilog language. For this reason, I studied a lot in digital design, and circuit simplification. In the evaluation stage, I made a survey for open-source RISC-V cores and SoC. Besides, to find design suitable for our SoC, I read many academic articles about approximate computing, floating point units and FPGA architecture.
One of the main parts in our project is the microarchitecture design, which requires us to select between concepts and make a balance in many aspects. For instance, the register renaming table needs to be recovered when mis-prediction or exceptions happen. However, there exist two structures to recover it accurately: checking point and retirement rename allocation table (rRAT). The former means that the processor should check the table when a branch instruction is renamed, while the latter just update the rRAT when an instruction is retired. In comparison, the method based on checking point requires a larger circuit but can provide faster recover speed. We first implement the first one in our processor. However, the final verification results show that a larger circuit leads to timing violation in FPGA implement. Therefore, I replace it with the rRAT one.
In terms of the technical support part, I also learn a lot from this project. We use my central server for circuit verification and implementation. The central server is much more powerful than our personal computers and can handle many computing tasks, such as compiling and logic synthesis. I am responsible for maintaining the server and provide in-time technique support. For example, I deploy the virtual network console (VNC) on the server so that we will share the same graphical user interface (GUI) during the project. Besides, I also teach my group mates to use Xilinx Vivado as a tool for logic synthesis and implementation. To share and manage our source code, we use Git and create an organization on GitHub. In \texttt{UMJI-VE450-21SU/Ria} repository on GitHub, I have my own working branches, including \texttt{Jian/Rename}, \texttt{Jian/ROB}, \texttt{Jian/FloatingPoint}, etc.
In terms of the technical communication part, we roughly have the same workload for each design review, final report and final expo. In design review 1, I do the literature search and take them as a benchmark for our design. In design review 2, I introduce the concept of instruction set architecture (ISA) and make a comparison between ARMv7 ISA and RISC-V 32G ISA. In design review 3, I compare our measured value with the target value in engineering specification. I also point out our design oversights in the engineering specification part. In the final expo, I take part in the oral presentation and JI design expo. Besides, I am responsible for the promotion video cutting. I devote myself to present an easy-to-understand and amazing project to the audience so that they can be inspired by our design and give us useful response for further improvement.
All in all, as a hardware engineer in this project, I have exploited my potential and make my own contribution to the project of ``RISC-V SoC Microarchitecture Design and Optimization''. We cooperate as a team and design a complex processor with RISC-V ISA.
| {
"alphanum_fraction": 0.8075103489,
"avg_line_length": 260.1538461538,
"ext": "tex",
"hexsha": "44120fa0612e14a50d2361d1759d66fb2d2318ee",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "224c1d798e772f39043401afafb3832ef1fe2518",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "UMJI-VE450-21SU/SJTU-Thesis-JI-Adaption",
"max_forks_repo_path": "individual/sj.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "224c1d798e772f39043401afafb3832ef1fe2518",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "UMJI-VE450-21SU/SJTU-Thesis-JI-Adaption",
"max_issues_repo_path": "individual/sj.tex",
"max_line_length": 892,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "224c1d798e772f39043401afafb3832ef1fe2518",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "UMJI-VE450-21SU/SJTU-Thesis-JI-Adaption",
"max_stars_repo_path": "individual/sj.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1379,
"size": 6764
} |
\subsection{Crypto Functions}
The first thing that we define is the crypto functions that will be needed to make the cryptocurrency.
Messages can be defined in multiple ways, one array of bytes, one string or a natural number.
Messages in this context means some data.
A private key is a number, a secret that someone has.
In Bitcoin, the private key is a 256-bit number.
A private key is used to signed messages.
The public key is generated from a private key.
But getting the private key from a public key is difficult.
To verify who signed a message with a private key, he has to show the public key.
Hash is an injection function (the probability of two functions having the same hash is very low).
The function is used from a big domain to a small domain.
For example, a hash of the big file (some GBs) is an integer of just some bytes.
It is very useful to prove for example that 2 files are equal.
If the hash of two files are equal, the probability of these files being equal is high.
It is used in torrents clients, so it is safe to download a program to untrusted peers,
just have to verify if the hash of the file is equal to the hash of the file wanted.
These functions can be defined, but it is not the purpose of this thesis.
So they will be just postulates.
\agda{Crypto}{cryptoPostulates}
| {
"alphanum_fraction": 0.7809160305,
"avg_line_length": 50.3846153846,
"ext": "tex",
"hexsha": "2a5ee1b2a588db86ce9d7cb1448215a0f8638661",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ac91e00abca9a26678d0cbc1bedecf8abef6b703",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "guilhermehas/cripto-agda",
"max_forks_repo_path": "docs/criptofunctions.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "ac91e00abca9a26678d0cbc1bedecf8abef6b703",
"max_issues_repo_issues_event_max_datetime": "2020-02-11T18:59:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-02-11T18:59:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "guilhermehas/crypto-agda",
"max_issues_repo_path": "docs/criptofunctions.tex",
"max_line_length": 102,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "ac91e00abca9a26678d0cbc1bedecf8abef6b703",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "guilhermehas/crypto-agda",
"max_stars_repo_path": "docs/criptofunctions.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-22T19:27:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-13T16:56:47.000Z",
"num_tokens": 291,
"size": 1310
} |
A wide variety of physical phenomena are modeled in the space-time domain by partial differential equations. The purpose of this section is to review generalities about PDEs and suited strategies depending on the nature of the equations so that solutions can be derived.
\subsection{General concepts}
A system of partial differential equations can be written by means of a vector operator $\vect{\Gc}$ of independent and dependent variables $(x_1,...,x_N)$ and $(u_1,...,u_I)$:
\begin{equation}
\label{eq:diff_operator}
\vect{\Gc}\(x_1,...,x_N,u_1,...,u_I,\drond{u_1}{x_1},..., \drond{^Mu_I}{x_N^M}\) = \vect{0}
\end{equation}
The dimension of the system is given by the size $I$ of the array $\vect{\Uc}^T=[u_1,...,u_I] \in \Rbb^I$, referred to as the \textit{unknown vector}. The highest derivative of the unknown vector in the system defines the \textit{order of the system} $M$. In equation \eqref{eq:diff_operator} and in what follows, sans-serif symbols refer to matrices while calligraphic symbols stand for column arrays. Furthermore, the partial derivatives of a quantity $u$ with respect to a variable $x$ may be written $u_x$ when there is no ambiguity. Making use of index notation and the convention of implicit summation over repeated indices, a system of partial differential equations reads:
\begin{equation*}
\sum_{k=1}^{N} \sum_{p=1}^{M}\Asf_{ij}^p \drond{^p\Uc_j}{x_k^p} + \Sc_i = 0
\end{equation*}
or equivalently, in matrix form:
\begin{equation}
\label{eq:diff_system_matrix}
\sum_{k=1}^{N}\sum_{p=1}^{M}\tens{\Asf}^p \drond{^p\vect{\Uc}}{x_k^p} + \vect{\Sc} = \vect{0}
\end{equation}
Coefficients matrices $\tens{\Asf}^p$ and the vector $\vect{\Sc}$ may depend on independent variables and the unknown vector ($x_1,...,x_N,\vect{\Uc}$) leading to different types of partial differential systems. Namely, whether those terms are functions of the $x_k$ or not leads respectively to \textit{linear systems with variable coefficients} or to \textit{linear systems with constant coefficients}. The system remains \textit{linear} if $\vect{\Sc}$ depends linearly on $\vect{\Uc}$, and is \textit{semi-linear} if the relation is non-linear. Finally, if $\tens{\Asf}^p$ depends on the vector $\vect{\Uc}$ and its derivatives up to order $M-1$, the system is called \textit{quasi-linear}.
The \textit{Cauchy problem} consists in finding a solution $\vect{\Uc}$ of system \eqref{eq:diff_system_matrix} that satisfies a set of given prescribed values. Geometrically speaking, the solution of such a problem can be seen as the building of a hyper-surface of $\Rbb^{I+N}$, hence the term of \textit{integral surface} for $\vect{\Uc}$. Such a problem can be reduced to that of solving a first-order Cauchy problem by using suitable changes of variables \cite[p.54]{PDEs}, we will therefore focus on first-order PDEs.
\subsection{Notion of characteristics -- Hyperbolic problems}
The theorem of \textit{Cauchy--Kowalewski} locally ensures the existence of solutions of a Cauchy problem for partial differential systems and is based on the restrictive requirement of analytic coefficient matrices and initial data (see \cite[p.46]{PDEs}). The case of first-order systems however, only requires continuity and differentiability conditions and is based on the concept of \textit{characteristics}, which makes the development of a solution more flexible.
\subsubsection*{First-order quasi-linear equations}
To illustrate the aforementioned notions, we consider the first-order quasi-linear PDE with independent variables $x$ and $t$:
\begin{equation}
\label{eq:1st_order_pde}
a u_x + b u_t = c
\end{equation}
where coefficients $a$ and $b$ are such that $a^2 + b^2 \neq 0$. Given values of $u$ are prescribed along a curve defined by means of a parameter $\eta$ in the $(x,t)$ plane as $\Cscr_0:(x(\eta),t(\eta))$, so that $u(x(\eta),t(\eta))$ draws a curve $\Cscr$ in the space $(x,t,u)$.
%In what follows, $\Cscr$ is referred to as the \textit{initial curve}.
We assume that $\Cscr_0$ is regular, namely $\ddroit{x}{\eta}^2 + \ddroit{t}{\eta}^2 \neq 0$, and that one of the derivatives, say $\ddroit{t}{\eta}$, does not vanish. Figure \ref{fig:initial_curve} shows an example of such an \textit{initial curve} $\Cscr$ prescribing values of $u$ along a parametrized curve of the $(x,t)$ plane.
%The value of $u$ on $\Cscr$ being used as a starting point for the solution of the Cauchy problem, $\Cscr$ is referred to as the \textit{initial curve}.
\begin{figure}[h]
\centering
\input{chapter2/pgfFigures/initialCurve}
\caption{Example of initial curve $\Cscr$ in the $(x,t,u)$ space and its projection $\Cscr_0$ in the $(x,t)$ plane.}
\label{fig:initial_curve}
\end{figure}
With data given along $\Cscr$, the Cauchy problem is equivalent to that of finding a surface $u(x,t)$ that contains the initial curve and satisfies \eqref{eq:1st_order_pde}.
Thus, one seeks the partial derivatives $u_x$ and $u_t$ of $u$ on $\Cscr$ in order to extend the given data into a strip in the neighborhood of the initial curve.
The total derivative of $u$ along $\Cscr$ being:
\begin{equation}
\label{eq:dtot_u}
\ddroit{u}{\eta}=u_x\ddroit{x}{\eta}+u_t\ddroit{t}{\eta}
\end{equation}
one gets, after multiplying the previous equation by $d\eta$, the following relation between the partial derivatives of $u$ on the initial curve:
\begin{equation*}
u_t= \ddroit{u}{t} - u_x\ddroit{x}{t}
\end{equation*}
Then, equation \eqref{eq:1st_order_pde} can be rewritten as:
\begin{equation}
\label{eq:normal_form_pde}
(a - b\ddroit{x}{t})u_x = c - b\left.\ddroit{u}{t}\right|_{t\in\Cscr_0}
\end{equation}
The right-hand side of equation \eqref{eq:normal_form_pde} is known along $\Cscr$ so that the Cauchy problem admits a unique solution $u_x$ if and only if:
\begin{equation}
\label{eq:non-characteristics}
\ddroit{x}{t}\neq \frac{a}{b}
\end{equation}
An initial curve satisfying the condition \eqref{eq:non-characteristics} is a \textit{non-characteristic curve} and enables to uniquely determine a solution of the Cauchy problem. On the other hand, an initial curve defined such that $\ddroit{x}{t} = \frac{a}{b}$ is a \textit{characteristic curve} and yields infinitely many solutions \cite[p.65]{Courant}.
%A \textit{non-characteristic curve} is an initial curve satisfying the condition \eqref{eq:non-characteristics}, otherwise it is a \textit{characteristic curve}.
\subsubsection*{Geometrical representation of characteristic curves}
Consider a partial differential equation of the form \eqref{eq:1st_order_pde}, and prescribed values of $u$ along a characteristic curve $\Cscr$. Since one cannot find a unique solution of the Cauchy problem in this case, infinitely many integral surfaces $u^{(i)}(x,t)$ with normal vectors $\vect{n}^{(i)}=[u^{(i)}_x,u^{(i)}_t,-1]$ can intersect $\Cscr$. Those integral surfaces satisfy equation \eqref{eq:1st_order_pde} and hence, $\vect{n}^{(i)}\cdot \vect{w}=0$ where $\vect{w}=[a,b,c]$, so that the set of tangent planes to solutions $u^{(i)}(x,t)$ forms a fan whose axis is $\vect{w}$.
This situation is depicted in figure \ref{fig:plan_fan} for which an initial characteristic straight line $\Cscr$ is contained by integral surfaces satisfying a PDE of the form \eqref{eq:1st_order_pde}.
%Let $u(x,t)$ be a surface in the $(x,t,u)$ space which normal vector is $\vect{n}=[u_x,u_t,-1]$. This surface is an integral surface if it is a solution of equation \eqref{eq:1st_order_pde} and hence, if $\vect{n}\cdot \vect{w}=0$ with $\vect{w}=[a,b,c]$.
%This situation is depicted in figure \ref{fig:plan_fan} for which an initial characteristic straight line $\Cscr$ is contained by integral surfaces satisfying some pde of the form \eqref{eq:1st_order_pde}.
% Figure \ref{fig:plan_fan} shows a collection of integral surfaces for some partial differential equation \eqref{eq:1st_order_pde} with prescribed $u$ along a straight line $\Cscr$.
% Then, the set of tangent planes to those solutions $u^{(i)}(x,t)$ forms a fan of planes which axis is $\vect{w}$.
\begin{figure}[h!]
\centering
\input{chapter2/pgfFigures/plan_fan}
\caption{Examples of integral surfaces passing through the same curve $\Cscr$ defined such that $t=\mathrm{constant}$ and $u=\mathrm{constant}$ along $\Cscr$.}
\label{fig:plan_fan}
\end{figure}
$\newline$
\textit{Characteristic line elements}, tangent to all integral surfaces $u^{(i)}(x,t)$ are then defined as:
\begin{equation}
\label{eq:monge_axis}
\matrice{dx \\ dt \\ du} = \matrice{a \\ b \\c}
\end{equation}
Introduction of a parameter $\eta$ and integration of equation \eqref{eq:monge_axis} yield a one-parameter family of characteristic curves of the PDE:
\begin{equation*}
x=x(\eta) \quad ; \quad t=t(\eta) \quad ; \quad u=u(\eta)
\end{equation*}
Hence, a characteristic curve is tangent at every point to all the integral surfaces, and an infinity of integral surfaces cross one characteristic curve. As a consequence, if the initial curve is a characteristic curve, infinitely many integral surfaces contain it so that the Cauchy problem cannot be solved.
However, the following statement holds \cite[p.63]{Courant}:
\begin{theorem}[Courant]
\label{th:integral_surface_generated}
Every surface $u(x,t)$ generated by a one-parameter family of characteristic curves is an integral surface.
Conversely, every integral surface is generated by a one-parameter family of characteristic curves.
\end{theorem}
This theorem will be used in what follows to solve the Cauchy problem.
%\subsection{The method of characteristic}
\subsubsection*{First-order quasi-linear systems}
The concept of characteristic curves is now extended to first-order quasi-linear systems of dimension $I$.
Consider the following system written in matrix form:
\begin{equation}
\label{eq:1st_order_quasi-linear_syst}
\Absf^t\(x,t,\vect{\Uc}\) \: \vect{\Uc}_t + \Absf^x\(x,t,\vect{\Uc}\)\: \vect{\Uc}_x + \vect{\Sc} = \vect{0}
\end{equation}
Similarly to quasi-linear PDEs, given values of $\Ucb$ are prescribed along a regular curve $\Cscr_0:(x(\eta),t(\eta))$ defining an initial curve $\Ucb(x(\eta),t(\eta))$ of the $(x,t,\Ucb)$ space. The Cauchy problem consists in finding all the derivatives of $\Ucb(x,t)$ such that equation \eqref{eq:1st_order_quasi-linear_syst} is satisfied in the vicinity of $\Cscr$.
Making use of the total derivative of $\Ucb$ along the initial curve:
\begin{equation}
\label{eq:dtot_Uvect}
\ddroit{\Ucb}{\eta}=\Ucb_x\ddroit{x}{\eta}+\Ucb_t\ddroit{t}{\eta} \quad \rightarrow \quad \Ucb_t = \left.\ddroit{\Ucb}{t}\right|_{t\in\Cscr_0} - \Ucb_x \ddroit{x}{t}
\end{equation}
system \eqref{eq:1st_order_quasi-linear_syst} can be rewritten:
\begin{equation}
\label{eq:normal_form}
\( \Absf^x - \lambda \Absf^t \) \vect{\Uc}_x + \Scb + \Absf^t\left.\ddroit{\Ucb}{t}\right|_{t\in\Cscr_0} = \vect{0}
\end{equation}
where:
\begin{equation}
\label{eq:lambda_slope}
\lambda=\ddroit{x}{t}
\end{equation}
With $\Scb$ and $\Absf^t\left.\ddroit{\Ucb}{t}\right|_{t\in\Cscr_0}$ known along $\Cscr_0$, the Cauchy problem admits a unique solution $\vect{\Uc}_x$ along $\Cscr$ if the determinant of the system does not vanish, that is:
\begin{equation}
\label{eq:characteristic_determinant}
D=\abs{\Absf^x - \lambda \Absf^t} \ne 0
\end{equation}
where D is called the \textit{characteristic determinant} of system \eqref{eq:1st_order_quasi-linear_syst}. If D does not have real roots along $\Cscr_0$, the problem is said \textit{elliptic} and the Cauchy problem can be solved. Indeed, in that case the knowledge of $\Ucb$ along the initial curve allows the computation of derivatives and hence, the building of an integral strip defined by $\Ucb,\Ucb_x,\Ucb_t$. If the characteristic determinant admits $I$ real roots on the other hand, system \eqref{eq:normal_form} can no longer be solved. Those eigenvalues come along with left and right eigenvectors respectively defined as:
\begin{equation}
\label{eq:eigenvectors}
\Lc^k_i \Asf^x_{ij} = \lambda_k \Lc^k_i \Asf^t_{ij} \quad ; \quad \Asf^x_{ij}\Rc^k_j = \lambda_k \Asf^t_{ij}\Rc^k_j \qquad k=1,...,I
\end{equation}
\begin{remark}
Note that eigenvectors can be stored as matrices $\Rbsf$ and $\Lbsf$ where $\Rsf_{ij}=\Rc^j_i$ and $\Lsf_{ij}=\Lc_j^i$.
\end{remark}
\begin{definition}
\label{def:hyperbolic_system}
A first-order system of $I$ partial differential equations is said \textbf{hyperbolic} if it admits real eigenvalues and $I$ independent eigenvectors \cite{Courant}.
For those problems, one can draw a set of one-parameter families of characteristic curves $\varphi^k$ in the $(x,t)$ plane by integrating the relations $\lambda_k=dx/dt$ ($1 \leq k \leq I$).
\end{definition}
\begin{example}
\label{ex:charac1}
Consider the first-order system with variable coefficients
\begin{equation*}
\matrice{x &0 \\0 &-x} \drond{}{t} \matrice{\Uc_1 \\ \Uc_2} + \drond{}{x}\matrice{\Uc_1 \\ \Uc_2} = \matrice{0 \\0}
\end{equation*}
whose characteristic determinant \eqref{eq:characteristic_determinant} is:
\begin{equation*}
(1-\lambda x)(1+\lambda x)=0
\end{equation*}
We thus have two solutions $\lambda_{1,2}=\pm 1/x$ leading, by integration of \eqref{eq:lambda_slope}, to two one-parameter families of characteristic curves:
\begin{equation*}
t_1(x)=\frac{1}{2}x^2+c_1 \quad \text{and} \quad t_2(x)=-\frac{1}{2}x^2+c_2
\end{equation*}
Those curves are drawn in figure \ref{fig:exampleCharac}\subref{subfig:curve_lines} for several values of integration constants $c_1$ and $c_2$.
\end{example}
\begin{example}
\label{ex:charac2}
Consider now the first-order system with constant coefficients
\begin{equation*}
\matrice{1 &0 \\0 &2} \drond{}{t} \matrice{\Uc_1 \\ \Uc_2} + \drond{}{x}\matrice{\Uc_1 \\ \Uc_2} = \matrice{0 \\0}
\end{equation*}
whose eigenvalues, according to equation \eqref{eq:characteristic_determinant} satisfy
\begin{equation*}
(1 - \lambda )(1- 2\lambda)=0
\end{equation*}
Two real roots exist $\lambda_1=1 \: ; \: \lambda_2=1/2$, leading by integration of \eqref{eq:lambda_slope} to two one-parameter families of straight lines:
\begin{equation*}
t_1(x)=x+c_1 \quad \text{and} \quad t_2(x)=2x+c_2
\end{equation*}
Unlike example \ref{ex:charac1}, coefficient matrices do not depend on independent variables, thus yielding characteristic straight lines in the $(x,t)$ plane (see \ref{fig:exampleCharac}\subref{subfig:straight_lines}).
\end{example}
\begin{figure}[h]
\centering
\subcaptionbox{Example \ref{ex:charac1}: $\lambda_{1,2}=\pm 1/x$\label{subfig:curve_lines}}{\input{chapter2/pgfFigures/exampleCharac}}
\subcaptionbox{Example \ref{ex:charac2}: $\lambda_{1}=1 \:\text{and} \: \lambda_2=1/2$\label{subfig:straight_lines}}{\input{chapter2/pgfFigures/exampleCharac_const}}
\caption{Family of characteristic curves corresponding to the eigenvalues of the first-order systems given in examples \ref{ex:charac1} and \ref{ex:charac2}.}
\label{fig:exampleCharac}
\end{figure}
\subsection{The method of characteristics}
As theorem \ref{th:integral_surface_generated} states, an integral surface is generated by a one-parameter family of characteristic curves. Therefore the knowledge of those curves enables the building of the solution of the Cauchy problem. Indeed, the projection of the quasi-linear system \eqref{eq:1st_order_quasi-linear_syst} onto the \textit{left eigenbasis} or \textit{left characteristic basis} leads to:
\begin{equation*}
\vect{\Lc}^k \( \Absf^t \vect{\Uc}_t + \Absf^x\vect{\Uc}_x \) + \vect{\Lc}^k \vect{\Sc}= \vect{0}
\end{equation*}
%Introduction of the definition of left eigenvectors \eqref{eq:eigenvectors} then yields:
where $\Lcb^k$ satisfies \eqref{eq:eigenvectors}, and hence:
\begin{equation*}
\vect{\Lc}^k \Absf^t \( \vect{\Uc}_t +\lambda_k \vect{\Uc}_x \) + \vect{\Lc}^k \vect{\Sc}=\vect{0}
\end{equation*}
In this equation, the \textit{directional derivative} of $\vect{\Uc}$ along the $k$th characteristic curve $\varphi^k$ arises, namely:
\begin{equation*}
\left.\ddroit{\Ucb}{t}\right|_{t\in\varphi^k} = \Ucb_t + \lambda_k \Ucb_x
\end{equation*}
Thus, along a characteristic curve a system of partial differential equations reduces to a system of \textit{Ordinary Differential Equations} (ODEs) composed of the following \textit{characteristic equations}:
\begin{equation}
\label{eq:PDEs_ODEs}
\vect{\Lc}^k \(\Absf^t \ddroit{\Ucb}{t} + \Scb \)=\vect{0}
\end{equation}
Integration of equations \eqref{eq:PDEs_ODEs} yields a set of \textit{integral curves} from which the Cauchy problem can be solved.
%It then comes out that the Cauchy problem can be solved as the system \eqref{eq:PDEs_ODEs}.
Indeed, the solution at a point of the $(x,t)$ plane can be determined by tracing backward the characteristic curves to the initial curve and integrating ODEs \eqref{eq:PDEs_ODEs} along those paths according to the \textit{method of characteristics}.
Note that if $\Scb$ is zero, then $\Ucb$ is constant along characteristic curves.
To illustrate the method, let us consider again the quasi-linear system of example \ref{ex:charac1} for which the Cauchy problem is built by prescribing initial conditions along the $x$-axis. Note that "initial conditions" have now a physical meaning since they are defined at $t=0$, the Cauchy problem is then an \textit{Initial Value Problem (IVP)}. Through a point $(x^*,t^*)$ pass two characteristic curves, each belonging to a different one-parameter family. The solution at this point can be determined by integrating the ODE corresponding to the first (\textit{resp. second}) eigenvalue of the system between $(x^1,0)$ (\textit{resp. $(x^2,0)$}) and $(x^*,t^*)$. The singularity of hyperbolic problems can hence be circumvented by using the characteristic structure in order to determine a unique solution.
\begin{figure}[h]
\centering
\input{chapter2/pgfFigures/characMethod2x2}
\caption{Domain of dependence of the solution at $(x^*,t^*)$ for the system of example \ref{ex:charac1}.}
\label{fig:charac_method2x2}
\end{figure}
We see that only a segment of the initial curve has an influence on the solution at a given point. Namely, the intersections of the initial curve and characteristic curves with the highest and the lowest slopes define the \textit{domain of dependence} of the solution at this point (see figure \ref{fig:charac_method2x2}). This property of hyperbolic problems implies the existence of waves that propagate information at finite speeds corresponding to the eigenvalues of a quasi-linear form. The theory presented so far will be applied in what follows to solid mechanics.
%%% Local Variables:
%%% mode: latex
%%% ispell-local-dictionary: "american"
%%% TeX-master: "../mainManuscript"
%%% End:
| {
"alphanum_fraction": 0.7422895695,
"avg_line_length": 83.0044444444,
"ext": "tex",
"hexsha": "c0931d18297599ab23c756f918ea6928979b1192",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adRenaud/research",
"max_forks_repo_path": "manuscript/chapter2/PDEs.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4",
"max_issues_repo_issues_event_max_datetime": "2019-01-07T13:11:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-07T13:11:11.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adRenaud/research",
"max_issues_repo_path": "manuscript/chapter2/PDEs.tex",
"max_line_length": 814,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adRenaud/research",
"max_stars_repo_path": "manuscript/chapter2/PDEs.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-18T14:52:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-18T14:52:03.000Z",
"num_tokens": 5614,
"size": 18676
} |
%----------------------------------------------------------------------------
\chapter{Implementation}
\label{cha:implementation}
%----------------------------------------------------------------------------
At the end of the design phase it is clearly visible, that the investigated technologies and tools what advantages have. To adopt the chosen technologies I chose to use the Java programming language. For model driven engineering the Eclipse platform serves the best tools, that's why I used Eclipse Modeling Tools to implement the application.
\begin{figure}[htp]
\centering
\includegraphics[scale=0.5]{figures/testgenerator_architecture}
\caption{Architecture of the test generator framework}
\label{fig:testgenerator_architecture}
\end{figure}
The test case generation process consists of the following steps (see Figure~\ref{fig:testgenerator_architecture}):
\begin{enumerate}
\item First the instance model have to created with the default PLC-HSM model editor generated by the Eclipse Modeling Framework. The model can have all the PLC-HSM model features except the timed transitions.
\item The next step is to create Alloy code, that can produce the test cases. The required informations can be extracted from the previously created PLC-HSM model, and so the desired Alloy code can be generated automatically. This generation was solved with Acceleo, which is a model to text transforming tool as part of the Eclipse Modeling Tools.
The generated Alloy code guarantees state and transition coverages. To create the Alloy code, we need to know the name of states, transitions, their relationship, the guards and the initial state of the SUT. From these information will be the necessary Alloy signatures and predicates generated.
\item The generated Alloy code can be executed with Alloy Analyzer to get the test suite with all the test cases.
\end{enumerate}
The generated Alloy code will be demonstrated with an example (see Figure~\ref{fig:alloy_statemachine}). The static part of the generated Alloy code can be see on Listing~\ref{lst:alloy_static}.
\begin{figure}[htp]
\centering
\includegraphics[scale=0.5]{figures/alloy_statemachine}
\caption{Example state machine with guard}
\label{fig:alloy_statemachine}
\end{figure}
The basic state machine element's (system, states, transitions) are between line number 4-7. The next section (line number 9-18.) describes the structure of a basic test case. One test case consists of several steps. The only given fact (line number 20-27.) defines the connection between a test case and the state machine. The predicate \texttt{inheritSystem} is a utility method, that can be used to inherit extended state variables from previous states. Predicates \texttt{transition\_coverage} and \texttt{state\_coverage} define transition and state coverage criteria accordingly. These predicates can be executed using the \texttt{run} statement used in line number 38.
\begin{lstlisting}[label={lst:alloy_static}, caption=Test suite generator Alloy code,breaklines=true]
module psm_statecoverage
open util/integer
abstract sig System {}
abstract sig State {system: one System}
abstract sig Transition {from, to: one State}
one sig Initial, End extends State {}
sig TestCase { firstStep: Step }
sig Step {
from, to: State,
via: Transition,
nextStep: lone Step
} {
via.from = from
via.to = to
}
fun steps (tc:TestCase): set Step { tc.firstStep.*nextStep }
fact {
all s:Step, tc:TestCase | s in tc.firstStep.*nextStep
all tc:TestCase | tc.firstStep.from = Initial
all t:Transition | one s:Step | s.via = t
all curr:Step, next:curr.nextStep | next.from = curr.to
all sys:System | some s:State | sys = s.system
all s:State | some t:Transition | t.from = s or t.to = s
}
pred inheritSystem(s1, s2: System) { s1 = s2 }
/***** GENERATED CODE START *****/
...
/***** GENERATED CODE END *****/
pred transition_coverage() { some tc:TestCase | steps[tc].via = Transition }
pred state_coverage() { some tc:TestCase | all s:State | s in steps[tc].from + steps[tc].to }
run state_coverage for 10 but exactly 1 TestCase
\end{lstlisting}
The dynamic part of the Alloy code, generated from the instance model can be see on Listing~\ref{lst:alloy_dynamic}. The code starts with the initialization of the SUT. The structure was defined in a signature, while the initial state of the SUT needs to define in a predicate. The rest of the code describes the other parts of the state machine: the states (\texttt{A}, \texttt{B}), the transitions (\texttt{T0}, \texttt{T1}, \texttt{T2}, \texttt{T3}), the events (\texttt{E0}) and the guards (\texttt{G0}).
\begin{lstlisting}[label={lst:alloy_dynamic}, caption=Dynamically generated Alloy codes,breaklines=true]
sig S extends System { a: Int }
pred initSystem(s:System) { s.a = 0 }
one sig A, B extends State {}
lone sig T0 extends Transition {}{
from = Initial
to = A
initSystem[from.system]
E0[from.system, to.system]
}
lone sig T1 extends Transition {}{
from = A
to = B
inheritSystem[from.system, to.system]
G0[from.system]
}
lone sig T2 extends Transition {}{
from = B
to = End
inheritSystem[from.system, to.system]
}
lone sig T3 extends Transition {}{
from = A
to = B
inheritSystem[from.system, to.system]
}
pred E0(s1, s2: System) { s2.a = add[s1.a, 1] }
pred G0(s: System) { s.a > 1 }
\end{lstlisting}
\begin{figure}[htp]
\centering
\includegraphics[scale=0.5]{figures/alloy_statecoverage}
\caption{Example test case generated from state machine in Figure~\ref{fig:alloy_statemachine}}
\label{fig:alloy_statecoverage}
\end{figure}
The above Alloy code generates test cases with state coverage guaranteed and the resulted test case is on Figure~\ref{fig:alloy_statecoverage} considering the previously defined state machine. As we can see the transition \texttt{T1}, having an unsatisfiable guard, is left out from the test case, and the generated test case satisfies all the requirements.
% chapter implementation (end) | {
"alphanum_fraction": 0.7469758065,
"avg_line_length": 50.0168067227,
"ext": "tex",
"hexsha": "90b23a878d99fd6fe571413d3a0381dd3c006df4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7e2da59405ae1b3ad5d1bc939e44086fa09b9ee1",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "thesnapdragon/msc-thesis",
"max_forks_repo_path": "doc/dipterv1/content/implementation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7e2da59405ae1b3ad5d1bc939e44086fa09b9ee1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "thesnapdragon/msc-thesis",
"max_issues_repo_path": "doc/dipterv1/content/implementation.tex",
"max_line_length": 675,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7e2da59405ae1b3ad5d1bc939e44086fa09b9ee1",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "thesnapdragon/msc-thesis",
"max_stars_repo_path": "doc/dipterv1/content/implementation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1432,
"size": 5952
} |
\documentclass{article}
\begin{document}
\section{What is RASI?}
RASI, which stands for \textit{Robocracy Autonomous Scripting Interface}, is a scripted language developed by Robocracy engineers to allow for faster programming and debugging times. Its principle usage was to allow for the developer to write code which could be run without needing to rebuild the entire application, and wait sometimes minutes for it to be loaded onto the android device. RASI has a simple syntax which is easy to understand and use, but it can be extended to do anything that could be done in Java.
\section{RASI Language Design}
Rasi consists of three main parts, the interpreter, the lexer, and \texttt{RasiCommands}.
\begin{itemize}
\item \textbf{\texttt{RasiCommands}} is the heart of what makes Rasi so useful, as it is where the programmer can write java functions that can be called from a rasi file, and in doing so, allow RASI to do anything that Java could.
\item \textbf{The Rasi Interpreter} takes in commands from the lexer and then actually calls the functions that are in \texttt{RasiCommands}. This is what actually runs the code from your java program.
\item \textbf{The Rasi Lexer} is what takes in a RASI file and translates it into commands and arguments for the interpreter to execute.[More stuff about how it is great]
\end{itemize}
\section{RASI Syntax}
RASI is based on calling java functions with \texttt{commands}, followed by a list of arguments with an optional TAG.
The syntax for calling a command without a tag is as follows: \texttt{command argument1 argument2} This calls the \texttt{command} function defined in \texttt{RasiCommands} with arguments \texttt{argument1} and \texttt{argument2}.
Tags are used to control whether a command should execute. If you would like the program to execute a tag, call \texttt{addTag TAG\_TO\_BE\_ADDED} before you use it. If you do not, any commands prefixed with that tag will not be executed.
\begin{verbatim}
addTag BLUE
BLUE: sayHello Hi
RED: sayGoodbye Bye!
end
\end{verbatim}
In this example, the program first adds the tag \texttt{BLUE}, then calls \texttt{sayHello("Hi")}, skips execution of sayGoodbye, and ends the program.
\section{Supported Types}
RASI currently only supports the Java basic types and strings. So, you can not pass in your MagicUnicorn class as an argument. Full supported types are:
\begin{enumerate}
\item Integers
\item Doubles
\item Characters
\item Float
\item Boolean
\item String
\end{enumerate}
\end{document}
| {
"alphanum_fraction": 0.7800078709,
"avg_line_length": 59.0930232558,
"ext": "tex",
"hexsha": "e823b9d9a58ea4eb3f26d1d93c2817511cc02507",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4b4aca2f447fac28dddb6c17e3495a7ac93c2c53",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ftc-9773/UltimateGoal",
"max_forks_repo_path": "TeamCode/src/main/java/org/firstinspires/ftc/teamcode/Logic/RASI/guides/src/Introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4b4aca2f447fac28dddb6c17e3495a7ac93c2c53",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ftc-9773/UltimateGoal",
"max_issues_repo_path": "TeamCode/src/main/java/org/firstinspires/ftc/teamcode/Logic/RASI/guides/src/Introduction.tex",
"max_line_length": 517,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4b4aca2f447fac28dddb6c17e3495a7ac93c2c53",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ftc-9773/UltimateGoal",
"max_stars_repo_path": "TeamCode/src/main/java/org/firstinspires/ftc/teamcode/Logic/RASI/guides/src/Introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 621,
"size": 2541
} |
%
% File coling2020.tex
%
% Contact: [email protected] & [email protected]
%% Based on the style files for COLING-2018, which were, in turn,
%% Based on the style files for COLING-2016, which were, in turn,
%% Based on the style files for COLING-2014, which were, in turn,
%% Based on the style files for ACL-2014, which were, in turn,
%% Based on the style files for ACL-2013, which were, in turn,
%% Based on the style files for ACL-2012, which were, in turn,
%% based on the style files for ACL-2011, which were, in turn,
%% based on the style files for ACL-2010, which were, in turn,
%% based on the style files for ACL-IJCNLP-2009, which were, in turn,
%% based on the style files for EACL-2009 and IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt]{article}
\usepackage{coling2020}
\usepackage{times}
\usepackage{url}
\usepackage{latexsym}
\usepackage{graphicx}
\graphicspath{ {./images/} }
%\setlength\titlebox{5cm}
\colingfinalcopy % Uncomment this line for the final submission
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\title{The importance of content in one's native language}
\author{Zubair Abid \\
20171076\\
IIIT Hyderabad\\
%Affiliation / Address line 3 \\
{\tt [email protected]} }%\\\And
% Second Author \\
% Affiliation / Address line 1 \\
% Affiliation / Address line 2 \\
% Affiliation / Address line 3 \\
% {\tt email@domain} \\}
\date{}
\begin{document}
\maketitle
\begin{abstract}
There is a severe lack of non-English content on the internet. More
specifically, a lack of languages that don't typically feature in the
European or South American Palate. We investigate the reasons as to why
having large amounts of data in local, native languages is incredibly
crucial, not only for NLP systems and training, but also for the general
improvements in Social, Political, Economic fronts that come about from
both embracing Native languages as well as from ensuring the growth of
organic language content on the internet.
\end{abstract}
\section{Introduction}
\label{intro}
All images have been taken from
\url{https://assets.kpmg/content/dam/kpmg/in/pdf/2017/04/Indian-languages-Defining-Indias-Internet.pdf}
A common problem with automated NLP language systems deployed on the internet --
especially large-scale ones based on Deep learning -- is that they tend to fail
when coming across languages that do not contain a large amount of readily
digitized training data. This is a rather common problem for many thus-called
"low-resource" languages, as their low quantities of easily available digitized
and tagged data makes State of the Art (SOTA) performance impossible with the
latest and greatest in Deep Learning, and they are thus restricted to simpler
Machine-learning techniques.
Lack of content in native (hereon, it is assumed that English is not a native
language for the majority of people, and hence "native language" will exclude
English -- especially as it is already de-facto the 'native language of the
web', and its native speakers are thus not hampered in similar means) languages
is a problem that plagues not only developers of Natural Language Processing
(NLP) systems, but one that also -- and, in fact, primarily -- impacts the
native speakers of the language itself. The problem is not so much technological
as it is sociological: the reason for the (attempted) existence of
aforementioned NLP tools is in fact to aid in some way to create a social impact
in use by native speakers.
Digging deeper, a big part of why people attempt to create such NLP tools --
like translators, summarisers, and what not -- is to improve the language
resources available online for people that speak a language that is not well
represented on the internet, to enable accessibility of content on the World
Wide Web. It is therefore ironic that the very problem these tools set out to
solve -- namely, the low representation of "native" languages -- are the reason
for the non-functionality of these very tools.
But to conceive of the existence of a problems requires demonstration of it. It
is not wont to simply \textit{claim} that one's "own" languages are important.
It is necessary to demonstrate to the plain eye that it is so.
\section{Problem description}
The problem in itself is simple enough. Most languages apart from English are
but mere second-class citizens on the train of the interwebs. In fact! To be a
second-class citizen is in itself a privilege; one primarily offered only to
prominent European and South-American languages. For the rest are limited to
hobbyist domains at best, perhaps Wikipedia may be so kind enough as to give the
language its own sub-domain and encyclopaedic homepage. And even so does not
guarantee the language an unfettered space on the internet, as we found out
recently \cite{canales_for_2020}.
This has a severe impact on several things. First, the fact that most people in
the world do not speak English -- only 1.27 billion out of an estimated 7.7
billion people on Earth \cite{ethnologue_english_2019}, and even fewer as a
native tongue - ranking 3rd on the list with only 379 million speakers, behind
Spanish (480 million) and far behind Mandarin (918 million)
\cite{ethnologue_what_2019}. This means that for a vast majority of the world, a
vast majority of the world wide web is locked off to them, unless someone in
their community makes an effort to translate a lot of the information, or set up
similar products with more native twists to it. It is a tragedy of sorts, the
world's largest ever Library of Alexandria at one's fingertips, indecipherable
due to linguistic boundaries.
The goal, therefore, is to see why content in one's own language is vitally
important, from multiple aspects - social, political, and economic.
\section{Major Insights}
We may already know -- from intuition, or otherwise -- that we are most
comfortable conversing with one another in our native tongues. While not
necessarily the language spoken by one's parents (the peer group one shares
plays just as important a role, if not more than stated), it is undeniable that
this language -- what some might term as their "language of thought" -- is the
one in which, given they option, they'd prefer to go about their daily routine
engaging in. This is one of they key insights we can bring into play for the
observations that we shall tackle later on.
Another key insight we can embrace is the observed role of Google Translate, and
other similar translation APIs, over the past few years. From being kludgy,
unworkable rule and statistics based highly inaccurate systems they have evolved
into a system that is still nowhere near any gold standard, but that can be
genuinely considered to be a temporary stand-in, if nothing else, for an actual
human translator - for after all the hangling and mangling and wearing about,
the API \textit{is} free (or available for use at a nominal charge). The point
here is that more and more we have begun to rely, and dare I say trust,
automated NLP systems to surpass the language barrier we ourselves do not have
the time to.
\section{Key Observations}
In this section, we will attempt to break down, by the three general categories
of Social, Political, and Economic, the various reasons for why language content
is important. The breakdowns shall include both the reasons for the need for
native language content, and the progress that can be made by the advancements
made in language processing due to the increase in aforementioned native
language content, opening up whole new worlds previously thought impermeable.
\subsection{Social}
\subsubsection{Accessibility}
The first, and probably one of the absolute key factors in the whole equation -
is accessibility. Accessibility is not something well standardised on the
internet; and even where the W3C, for example \cite{initiative_wai_w3c_nodate},
has setup accessibility rules, they are primarily directed at persons with
physical disabilities, rather than at people who might speak different languages
than you. It is a bit ambitious, as one might imagine, to regulate the entirety
of the internet to providing linguistic accessibility -- an child's toy website
cannot be expected to pay for translations into 176+ languages.
That being said, the primary advantage of native-language websites is that now
anyone who speaks that language can read it. It is limited in its scope - for
the widest reach, one would employ English -- which as we saw earlier, almost
5/7ths of the world does not know, even as a second or third language. That said
for the ones who do not speak English but do Persian, for instance, can now read
the websites that have been made with Persian users in mind. It is not the
entire vast expanse of the multilingual internet, but a significant portion
nonetheless, and an essential step, as we have touched upon earlier but will
discuss in detail in just a bit. Native-language content allows users of that
language to access a seemingly infinite resource they were locked out of before.
And websites can be translated, their reach is not necessarily limited by the
geographical limits of their language, but the efforts of those bilinguals
willing to put that much more into getting their community to grow one article
at a time. A regulatory body in India \cite{noauthor_internet_2016} claimed in
2016 that "Internet access" and "local content" are a must for Digital India
success.
\includegraphics[width=\textwidth]{localrel}
Access to the internet in a native language brings with it not only the expanse
of content provided to the user in said native tongue, but also access to the
vast and open nature of the internet in itself. Collaborative encyclopaedias,
open access to the latest state of the art in technological advancements, the
wide world of open source. By enabling native language content on the internet,
it enriches the lives of both those inducted and those inducting.
A key feature that should not be forgotten here is the incredible growth the
vast multitudes of multilingual data will provide to researchers working in
Information Retrieval and Natural Language Processing, enabling them to better
overcome the problem of data sparsity. And the data will, for a while anyway,
definitely never be enough -- as seen in recent times, from 340 million
parameters in BERT \cite{devlin_bert_2019}, to the 1.5 billion parameters in
GPT-2 \cite{radford_language_2019}, all the way up to 150 billion in GPT-3
\cite{brown_language_2020}, at which there is still no end in sight,
Transformers get better the more data they get.
What that means is that due to the increased amount of language data and more
users of said language, the tools to improve accessibility from outside to
enable the users to explore far more of the internet than was previously
possible, engaging other users to join and generate more content which in turn
improves the systems further and further and on and on and so on.
But what does that \textit{actually} mean? In case you have read the fantastic
Hitchhikers Guide to the Galaxy, this means we functionality can get to the
point of having a babel fish. From the lowered, demeaned status of stragglers on
the internet highways suddenly everybody is upgraded to first class status, able
to access and read and understand anything written in any language, including
the highly abundant English side of the internet. Not to exploit the Library of
Alexandria metaphor \textit{too} much, but -- I mean -- the modern Library of
Alexandria, at one's fingertips.
(It should go without saying that the vision described here is as far into the
future as Artificial General Intelligence (AGI), and it is incredibly unlikely
that the state of AI as it is today will be able to reach those staggering
heights. Yet, are we not already there to an extent? Consider how often we use
Google translate's "Translate this page" to read an Arabic news report about our
favourite football team. If such small steps are a reality now, it is not
impossible to imagine a future -- not a perfect one, perhaps, but one that
exists \textit{pretty darn well} for what it is.)
\subsubsection{Education}
And then we come to Education. A contentious topic, one that many have wrapped
their heads around, many with far worthier heads than yours or mine. And yet,
the results remain untold, every new batch of students one untested experiment
after the other. Will discipline hold them in? Perhaps... more medieval methods?
It appears
not, as it is now the twenty first century, and yet newer batches have been
ushered in for experimentation after those of the last few decades never managed
to catch on to a winning formula.
One of these debates is about the \textit{language of education}. It has always
been the case. Wherever be a conflict of language, there will be the argument of
what should be taught at school? The answer may not be as simple as we may make
it seem due to the plays and tricks of politics, but skipping over such for the
day we move into the seemingly simple question of, what's better for the child?
You might recall the earlier "Major Insight" where we claimed "undeniably" that
one's native tongue was best suited to educate themselves to the best of their
intellectual capacity. That was not entirely conjured out of thin air and
misdirection. Research has suggested \cite{hudelson_role_1987} that one's Native
tongue when used when educating at the primary level benefits children's
development in classrooms. Others have also spoken of the benefits of fluency in
bilingual education, rather than eradicating the native tongue for English
\cite{hakuta_compendium_1986} \cite{cummins_bilingualism_1981}.
Students from the UG2k17 batch may in fact remember something else related to
the issue - a series of 2 Theory of Knowledge lectures during induction by
Professor Harjinder Singh (Laltu). In this series of lectures, the focus was on
how humans -- here, specifically Indians -- would benefit from being taught in
their "mother tongue" for the first few years of primary education
\cite{noauthor_knowledge_2020}. This may also seem familiar to those who have
not attended this lecture series - the National Educational Policy (NEP) 2020
announced a very similar plan, of requiring education in the mother/regional
tongue through elementary level.
So how does this tie into the need for content in native tongues on the
internet? Education is not limited to the classroom -- the internet is rife with
avenues for learning. Particularly in the form of both videos and text,
native-language information is more digestible to a populace more familiar with
their own tongue. Consider the absolute carnage of Hindi video tutorials on
youtube, and their popularity that surpasses even some of the biggest "Youtube
celebrities" with their international appeal with English. The content is still
language-locked -- in that the content cannot be consumed outside of the
boundaries of the language itself, but to those that could not access other
tutorials and lectures earlier, now there is a way to learn.
Now let us take this goldmine of content yet again with the lens of IR and NLP
-- with more data, comes better models. With better models, come better systems.
With better systems, inter and intra language tasks become more accurate, more
reliable, more generically usable. A Bengali kid can now tap into this system
and watch English MOOC content, without knowing a word of the language. Okay,
some of it is needed, but they're borrowed words anyway so it does not
particularly matter as much. The point is that the information of all of the
internet, in its previously untapped potential, is now available to anyone who
is able to access content in their own native tongue.
\subsubsection{Culture}
One oft-forgotten aspect of the social fabric is culture -- here, more
specifically, how language content available on the internet will enable
cultural conversion and exploration in ways previously imaginable. To quote
\cite{geser_digicult_2002}, "The conversion of all sorts of cultural contents
into bits and bytes opens up a completely new dimension of reaching traditional
and new audiences by providing access to cultural heritage resources in ways
unimaginable a decade ago". The digital age allows access to tools and methods
previously never thought of, to not port a language and its associated culture
to the interwebs would be quite the disaster.
\subsection{Political}
There are political arguments to be made as well.
English is often considered by many to be the language of the "elite" -- the
landed, casted upper classes, protégés of the Imperial system of education in
all but name. It's a common slur of sorts, cast at those who speak English,
hints of Lutyen's in the attacks.
Even among those not going to such extremes -- it is, after all, the twenty
first century -- it is a common refrain that the politics of the rich is
inaccessible, precisely because it is only available in one language -- English
-- a language inaccessible to those not in the upper echelons of a casted
society, and even when available, restricted through accent and fluency to
create an artificial class divide. Such is the claim, of English as the "elite
persons language".
To avoid that, information needs to be in native tongues too. Be it the
Communist Manifesto or Atlas Unshrugged, the mass appeal comes from its far more
accessible to many translations.
\subsection{Economic}
It just so happens that the same people who use the internet to learn, talk, and
so on and so forth will also happen to use the very same internet for online
shopping, and visiting websites that sell things, even if not physically for
money at the very spot.
\includegraphics[width=\textwidth]{indlang}
It also happens thus, by adding a language-specific version of the website, user
checkout and interaction goes through the roof. Which is again, not too
surprising -- most areas have some local language along with a more "formal"
one, often English, where the formal one is more a rarity and the local, the
language of the masses. E-commerce would be expected to be more in the local
versions of these websites, sometimes in large enough regions that substantial
profits can be made off of the addition of the language-native search and
operation options.
\includegraphics[width=\textwidth]{indad}
\section{Conclusions}
The employment of local or native languages across the internet is still as of
date, like most of the internet, voluntary, albeit sometimes funded by State
governments in order to promote said language. That said, it is a pursuit worth
pursuing, not only for the benefit of Deep Learning systems requiring more data,
but also for humans.
% include your own bib file like this:
% \bibliographystyle{coling}
% \bibliography{coling2020}
\bibliographystyle{coling}
\bibliography{ireterm}
%\begin{thebibliography}{}
%\bibitem[\protect\citename{Aho and Ullman}1972]{Aho:72}
%Alfred~V. Aho and Jeffrey~D. Ullman.
%\newblock 1972.
%\newblock {\em The Theory of Parsing, Translation and Compiling}, volume~1.
%\newblock Prentice-{Hall}, Englewood Cliffs, NJ.
%\bibitem[\protect\citename{{American Psychological Association}}1983]{APA:83}
%{American Psychological Association}.
%\newblock 1983.
%\newblock {\em Publications Manual}.
%\newblock American Psychological Association, Washington, DC.
%\bibitem[\protect\citename{{Association for Computing Machinery}}1983]{ACM:83}
%{Association for Computing Machinery}.
%\newblock 1983.
%\newblock {\em Computing Reviews}, 24(11):503--512.
%\bibitem[\protect\citename{Chandra \bgroup et al.\egroup }1981]{Chandra:81}
%Ashok~K. Chandra, Dexter~C. Kozen, and Larry~J. Stockmeyer.
%\newblock 1981.
%\newblock Alternation.
%\newblock {\em Journal of the Association for Computing Machinery},
% 28(1):114--133.
%\bibitem[\protect\citename{Gusfield}1997]{Gusfield:97}
%Dan Gusfield.
%\newblock 1997.
%\newblock {\em Algorithms on Strings, Trees and Sequences}.
%\newblock Cambridge University Press, Cambridge, UK.
%\bibitem[\protect\citename{Rasooli and Tetreault}2015]{rasooli-tetrault-2015}
%Mohammad~Sadegh Rasooli and Joel~R. Tetreault. 2015.
%\newblock {Yara parser: {A} fast and accurate dependency parser}.
%\newblock \emph{Computing Research Repository}, arXiv:1503.06733.
%\newblock Version 2.
%\bibitem[\protect\citename{Borschinger and Johnson}2011]{borsch2011}
%Benjamin Borschinger and Mark Johnson. 2011.
%\newblock A particle filter algorithm for {B}ayesian wordsegmentation.
%\newblock In \emph{Proceedings of the Australasian Language Technology Association %Workshop 2011}, pages 10--18, Canberra, Australia.
%\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7901673541,
"avg_line_length": 51.9532019704,
"ext": "tex",
"hexsha": "e8319c0c6ee0cbfba44524b0fcbb86e1d7f48ef8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a27fd31c719e11eb136e4246dfc2b2461ec9702d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zubairabid/Semester7",
"max_forks_repo_path": "subjects/IRE/TermPaper/coling2020/coling2020.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a27fd31c719e11eb136e4246dfc2b2461ec9702d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zubairabid/Semester7",
"max_issues_repo_path": "subjects/IRE/TermPaper/coling2020/coling2020.tex",
"max_line_length": 135,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a27fd31c719e11eb136e4246dfc2b2461ec9702d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zubairabid/Semester7",
"max_stars_repo_path": "subjects/IRE/TermPaper/coling2020/coling2020.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-29T16:14:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-29T16:14:08.000Z",
"num_tokens": 4971,
"size": 21093
} |
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{authblk}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={corner.py: Scatterplot matrices in Python},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
\title{corner.py: Scatterplot matrices in Python}
\author{Daniel Foreman-Mackey}
\affil{Sagan Fellow, University of Washington}
\date{26 May 2016}
\begin{document}
\maketitle
\textbf{Paper DOI:} \url{http://dx.doi.org/10.21105/joss.00024}\\
\textbf{Software Repository:} \url{https://github.com/dfm/corner.py}\\
\textbf{Software Archive:} \url{http://dx.doi.org/10.5281/zenodo.53155}\\
\section{Summary}\label{summary}
This Python module uses matplotlib (Hunter 2007) to visualize
multidimensional samples using a scatterplot matrix. In these
visualizations, each one- and two-dimensional projection of the sample
is plotted to reveal covariances. \emph{corner} was originally conceived
to display the results of Markov Chain Monte Carlo simulations and the
defaults are chosen with this application in mind but it can be used for
displaying many qualitatively different samples.
Development of \emph{corner} happens on GitHub and any issues can be
raised there (Foreman-Mackey 2016). \emph{corner} has been used
extensively in the astronomical literature and it has occasionally been
cited as \texttt{corner.py} or using its previous name
\texttt{triangle.py}. The source code for \emph{corner} has been
archived to Zenodo and it has the DOI (Zenodo Archive 2016)
The following is a simple demonstration of a visualization made with
\emph{corner}:
-\includegraphics{corner.png}
\section*{References}\label{references}
\addcontentsline{toc}{section}{References}
\hypertarget{refs}{}
\hypertarget{ref-corner_github}{}
Foreman-Mackey, Daniel. 2016. ``Corner.py on Github.''
\url{https://github.com/dfm/corner.py}.
\hypertarget{ref-matplotlib}{}
Hunter, John D. 2007. ``Matplotlib: A 2D Graphics Environment.''
\emph{Computing in Science and Engineering} 9 (3): 90--95.
doi:\href{https://doi.org/10.1109/MCSE.2007.55}{10.1109/MCSE.2007.55}.
\hypertarget{ref-corner_archive}{}
Zenodo Archive. 2016. ``Corner.py: Scatterplot Matrices in Python.''
\url{http://dx.doi.org/10.5281/zenodo.53155}.
doi:\href{https://doi.org/10.5281/zenodo.53155}{10.5281/zenodo.53155}.
\end{document}
| {
"alphanum_fraction": 0.7635585157,
"avg_line_length": 36.5565217391,
"ext": "tex",
"hexsha": "e25940a4696fe5471f944ade523ed9f222f9fbb4",
"lang": "TeX",
"max_forks_count": 182,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T10:16:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-09-29T21:58:29.000Z",
"max_forks_repo_head_hexsha": "e198cb335c6277dc629eaa16aa28f398c6e820f5",
"max_forks_repo_licenses": [
"BSD-2-Clause-FreeBSD"
],
"max_forks_repo_name": "kokron/corner.py",
"max_forks_repo_path": "paper/paper.tex",
"max_issues_count": 106,
"max_issues_repo_head_hexsha": "e198cb335c6277dc629eaa16aa28f398c6e820f5",
"max_issues_repo_issues_event_max_datetime": "2022-03-25T15:53:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-09-15T22:17:09.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause-FreeBSD"
],
"max_issues_repo_name": "kokron/corner.py",
"max_issues_repo_path": "paper/paper.tex",
"max_line_length": 83,
"max_stars_count": 324,
"max_stars_repo_head_hexsha": "e198cb335c6277dc629eaa16aa28f398c6e820f5",
"max_stars_repo_licenses": [
"BSD-2-Clause-FreeBSD"
],
"max_stars_repo_name": "kokron/corner.py",
"max_stars_repo_path": "paper/paper.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T06:30:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-16T06:24:37.000Z",
"num_tokens": 1324,
"size": 4204
} |
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size
\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default
\usepackage[english]{babel} % English language/hyphenation
\usepackage{amsmath,amsfonts,amsthm,amssymb} % Math packages
\usepackage{mathrsfs}
\usepackage{algorithm, algorithmic}
\renewcommand{\algorithmicrequire}{\textbf{Input:}} %Use Input in the format of Algorithm
\renewcommand{\algorithmicensure}{\textbf{Output:}} %UseOutput in the format of Algorithm
\usepackage{listings}
\lstset{language=Matlab}
\usepackage{lipsum} % Used for inserting dummy 'Lorem ipsum' text into the template
\usepackage{sectsty} % Allows customizing section commands
\allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps
\usepackage{fancyhdr} % Custom headers and footers
\pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers
\fancyhead{} % No page header - if you want one, create it in the same way as the footers below
\fancyfoot[L]{} % Empty left footer
\fancyfoot[C]{} % Empty center footer
\fancyfoot[R]{\thepage} % Page numbering for right footer
\renewcommand{\headrulewidth}{0pt} % Remove header underlines
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt} % Customize the height of the header
\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text
%----------------------------------------------------------------------------------------
% TITLE SECTION
%----------------------------------------------------------------------------------------
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height
\title{
\normalfont \normalsize
\textsc{Shanghai Jiao Tong University, UM-SJTU JOINT INSTITUTE} \\ [25pt] % Your university, school and/or department name(s)
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge Introduction to Numerical Analysis \\ HW5 \\ % The assignment title
\horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule
}
\author{Yu Cang \\ 018370210001} % Your name
\date{\normalsize \today} % Today's date or a custom date
\begin{document}
\maketitle % Print the title
%----------------------------------------------------------------------------------------
% PROBLEM 1
%----------------------------------------------------------------------------------------
\section{Lebesgue constant for Chebyshev nodes}
\begin{enumerate}
\item
\begin{enumerate}
\item
\begin{proof}
Denote
\begin{equation}
\begin{aligned}
LHS &\triangleq (x-x_i)l_i(x) \\
RHS &\triangleq \frac{T_{n+1}(x)}{T_{n+1}'(x_i)}
\end{aligned}
\end{equation}
It is left to prove $LHS = RHS$.\\
The left part can be written as
\begin{equation}
LHS = c_l \omega(x)
\end{equation}
where
\begin{equation}
\omega(x) = \prod_{i=0}^{n}(x-x_i)
\end{equation}
and
\begin{equation}
c_l = \frac{1}{\prod_{k=0, k\neq i}^{n}(x_i - x_k)}
\end{equation}
Since both $LHS$ and $RHS$ are polynomials of order $n+1$, they are equivalent iff. they have same roots and leading coefficients.\\
On the one hand,as for all $x_i$, where $i = 0, 1, ... , n$
\begin{equation}
T_{n+1}(x_i) = cos((n+1)\theta_i) = cos(\frac{2i+1}{2}\pi) = 0
\end{equation}
Thus, $LHS$ and $RHS$ have same roots. $RHS$ can therefore be written as
\begin{equation}
RHS(x) = c_r \omega(x)
\end{equation}
On the other hand, since
\begin{equation}
LHS'(x)|_{x=x_i} = (l_i(x) + (x-x_i)l_i'(x))|_{x=x_i} = 1
\end{equation}
and
\begin{equation}
RHS'(x)|_{x=x_i} = \frac{T'_{n+1}(x)}{T_{n+1}'(x_i)}\Big|_{x=x_i} = 1
\end{equation}
Thus, the leading coefficients of $LHS$ and $RHS$ are equal, namely $c_l = c_r$.\\
Hence, $LHS=RHS$.
\end{proof}
\item
\begin{proof}
\begin{equation}
\begin{aligned}
T'_{n+1}(x) & = (cos((n+1)arccos(x)))'\\
& = sin((n+1)arccos(x)) (n+1) \frac{1}{\sqrt{1-x^2}}\\
& = \frac{n+1}{\sqrt{1-cos^2(\theta)}} sin((n+1)\theta)
\end{aligned}
\end{equation}
As $\theta_k = \frac{2k+1}{2(n+1)}\pi$, thus, $sin(\theta_k) > 0$, and
\begin{equation}
T'_{n+1}(x_k) = \frac{n+1}{sin(\theta_k)} sin(\frac{2k+1}{2}\pi) = {(-1)}^k \frac{n+1}{sin(\theta_k)}
\end{equation}
\end{proof}
\item
\begin{proof}
As
\begin{equation}
T_{n+1}(1) = cos((n+1)arccos(1)) = 1
\end{equation}
Thus
\begin{equation}
\begin{aligned}
\sum_{i=0}^{n}|l_i(1)| & = \sum_{i=0}^{n}\Bigg|\frac{T_{n+1}(1)}{(1-x_i)T'_{n+1}(x_i)}\Bigg| \\
& = \sum_{i=0}^{n} \frac{1}{\Big|(1-x_i)T'_{n+1}(x_i)\Big|}\\
& = \frac{1}{n+1} \sum_{i=0}^{n} \bigg|\frac{sin\theta_i}{(1-cos\theta_i)} \bigg|\\
& = \frac{1}{n+1} \sum_{i=0}^{n} \bigg|\frac{sin\theta_i}{2sin^2(\frac{\theta_i}{2})} \bigg|\\
& \geq \frac{1}{n+1} \sum_{i=0}^{n} cot(\frac{\theta_i}{2})\\
\end{aligned}
\end{equation}
\end{proof}
\end{enumerate}
\item
\begin{enumerate}
\item
\begin{proof}
According to the mean value theorem, there exists $\theta \in [\frac{\theta_k}{2}, \frac{\theta_{k+1}}{2}]$, s.t.
\begin{equation}
\int_{\frac{\theta_k}{2}}^{\frac{\theta_{k+1}}{2}} cot(t) dt = \frac{\theta_{k+1} - \theta_k}{2} cot(\theta)
\end{equation}
As $cot'(t) = -\frac{1}{sin^2(t)} < 0$, and $\theta_k \leq \theta \leq \theta_{k+1}$, thus
\begin{equation}
cot(\theta) \leq cot(\theta_k)
\end{equation}
Therefore
\begin{equation}
\int_{\frac{\theta_k}{2}}^{\frac{\theta_{k+1}}{2}} cot(t) dt \leq \frac{\theta_{k+1} - \theta_k}{2} cot(\theta_k)
\end{equation}
\end{proof}
\item
\begin{proof}
As $\theta_{k+1} - \theta_k = \frac{\pi}{n+1}$ and according to the result that have been proved above
\begin{equation}
\begin{aligned}
\sum_{k=0}^{n} \int_{\frac{\theta_k}{2}}^{\frac{\theta_{k+1}}{2}} cot(t) dt
& \leq \sum_{k=0}^{n} \frac{\theta_{k+1} - \theta_k}{2}cot(\frac{\theta_k}{2})\\
& = \frac{\pi}{2(n+1)} \sum_{k=0}^{n} cot(\frac{\theta_k}{2})
\end{aligned}
\end{equation}
\end{proof}
\item
\begin{proof}
As $\theta_n = \frac{2n+1}{2n+2}\pi < \pi$, $\theta_{n+1} = \frac{2n+3}{2n+2} > \pi$, and $cot(x)$ is positive over $(0, \frac{\pi}{2})$, while negative otherwise.
Thus
\begin{equation}
\int_{\frac{\theta_0}{2}}^{\frac{\pi}{2}} cot(t) dt
\leq \int_{\frac{\theta_0}{2}}^{\frac{\theta_n}{2}} cot(t) dt
= \sum_{k=0}^{n-1} \int_{\frac{\theta_k}{2}}^{\frac{\theta_{k+1}}{2}} cot(t) dt
\end{equation}
Hence
\begin{equation}
\int_{\frac{\theta_0}{2}}^{\frac{\pi}{2}} cot(t) dt \leq \frac{\pi}{2(n+1)} \sum_{i=0}^{n}cot(\frac{\theta_i}{2})
\end{equation}
(... not fine)
\end{proof}
\end{enumerate}
\item
\begin{proof}
\begin{equation}
\begin{aligned}
\Lambda_n & = \max_{x \in [a, b]} \sum_{i=0}^{n} |l_i(x)|\\
& \geq \sum_{i=0}^{n} |l_i(1)|\\
& \geq \frac{1}{n+1} \sum_{i=0}^{n} cot(\frac{\theta_i}{2})\\
& \geq \frac{2}{\pi} \int_{\theta_0 / 2}^{\pi/2} cot(t)dt\\
& = \frac{2}{\pi} ln(|sin(t)|) \Bigg| _{\theta_0/2}^{\pi/2}\\
& = - \frac{2}{\pi} ln(sin(\frac{\theta_0}{2}))\\
& \geq \frac{2}{\pi} ln(\frac{2}{\theta_0}) = \frac{2}{\pi} ln(\frac{4(n+1)}{\pi})\\
& \ge \frac{2}{\pi} ln(n)
\end{aligned}
\end{equation}
\end{proof}
\end{enumerate}
%----------------------------------------------------------------------------------------
% PROBLEM 2
%----------------------------------------------------------------------------------------
\section{Interpolation}
\begin{enumerate}
\item
\begin{proof}
Since $\Phi(f)=0$, $f(x_k) + f(y_k) = 0$ is valid for any $k$.\\
Thus $f(x_k) = f(y_k) = 0$ or $f(x_k)$ and $f(y_k)$ have different signs.\\
In the first case, take $\xi_k = x_k$ or $\xi_k = y_k$;\\
In the second case, as $f$ is continous over $[a, b]$, according to the intermediate value theorem, there exists $\xi_k \in [x_k, y_k]$ s.t. $f(\xi_k) = 0$.
\end{proof}
\item
\begin{proof}
Let $h_1(x) \in \mathbb{R}_n [x]$, $h_2(x) \in \mathbb{R}_n [x]$ and $h_1(x) \neq h_2(x)$. They can be written as
\begin{equation}
h_1(x) = \sum_{i=0}^{n} a_i x^i
\end{equation}
\begin{equation}
h_2(x) = \sum_{i=0}^{n} b_i x^i
\end{equation}
Assume $\Phi(h_1) = \Phi(h_2)$, then
\begin{equation}
h_1(x_k) + h_1(y_k) = h_2(x_k) + h_2(y_k) \ \ (k=0, 1, ... , n)
\end{equation}
Thus
\begin{equation}
\begin{bmatrix}
(1+1) & (x_0 + y_0) & (x_0^2 + y_0^2) & ... & (x_0^n + y_0^n)\\
(1+1) & (x_1 + y_1) & (x_1^2 + y_1^2) & ... & (x_1^n + y_1^n)\\
\vdots & \vdots & \vdots & \ddots & \vdots \\
(1+1) & (x_n + y_n) & (x_n^2 + y_n^2) & ... & (x_n^n + y_n^n)\\
\end{bmatrix}
\begin{bmatrix}
a_0 - b_0\\
a_1 - b_1\\
\vdots \\
a_n - b_n\\
\end{bmatrix}
=
\begin{bmatrix}
0\\
0\\
\vdots \\
0\\
\end{bmatrix}
\end{equation}
The determinant of the coefficient matrix above is positive as it is the sum of $2^{n+1}$ determinents, each of these be positive as $x_0 < y_0 < x_1 < y_1 < ... < x_n < y_n$. Thus, solution to this linear system is $x = [0, 0, ... , 0]^T$, which means
\begin{equation}
a_i = b_i \ \ (i=0, 1, ... , n)
\end{equation}
So, the hypothsis fails as $h_1(x) = h_2(x)$. Hence, $\Phi(f) \neq \Phi(g)$, which means $\Phi$ is injective.\\
Assume $\Phi(P1) = \Phi(f)$, $\Phi(P2) = \Phi(f)$, according to the converse-negative proposition of the injective property, there should be $P1 = P2$, thus the unicity is proved.
\end{proof}
\item
\begin{proof}
Let $P_n(x) = a_n x^n + a_{n-1} x^{n-1} + ... + a_1 x + a_0$, denote $h(x) = P_n(x) - f(x)$. As $\Phi(P_n) = \Phi(f)$, then
\begin{equation}
h(x_k) + h(y_k) = 0
\end{equation}
Thus, there exists $\xi_k \in [x_k, y_k]$ s.t. $h(\xi_k) = 0$. So
\begin{equation}
\sum_{i=0}^{n}a_i \xi_k ^ i = f(\xi_k) \ \ (k=0, 1, ... , n)
\end{equation}
which can be written as matrix form
\begin{equation}
\begin{bmatrix}
1 & \xi_0 & \xi_0^2 & ... & \xi_0^n \\
1 & \xi_1 & \xi_1^2 & ... & \xi_1^n \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & \xi_n & \xi_n^2 & ... & \xi_n^n \\
\end{bmatrix}
\begin{bmatrix}
a_0\\
a_1\\
\vdots \\
a_n\\
\end{bmatrix}
=
\begin{bmatrix}
f(\xi_0)\\
f(\xi_1)\\
\vdots \\
f(\xi_n)\\
\end{bmatrix}
\end{equation}
The determinant of the coefficient matrix is the so called Vandermonde determinant, and $\xi_j > \xi_i$ when $j>i$. Thus, determinant of the coefficient matrix is positive, which means there exist $n+1$ points in $[a, b]$, denoted as $\xi_i$, s.t. $P(\xi_i) = f(\xi_i)$. Thus $P_n$ is the interpolation polynomial of $f$.\\
Applying the Taylor theorem, it's clear that
\begin{equation}
f(x)-P_n(x) = \frac{(x-a)^{n+1}}{(n+1)!}f^{(n+1)}(\xi)
\end{equation}
Thus
\begin{equation}
\lVert f- p_n \rVert_\infty = \max_{x \in [a, b]} |f-P_n| \leq \frac{(b-a)^{n+1}}{(n+1)!}\sup_{x \in [a, b]}|f^{(n+1)}(x)|
\end{equation}
\end{proof}
\end{enumerate}
%----------------------------------------------------------------------------------------
% PROBLEM 3
%----------------------------------------------------------------------------------------
\section{Trigonometric polynomials}
\begin{enumerate}
\item
\begin{proof}
It's clear that the statement holds when $k=0, 1$ as $1$ and $cos \theta$ are in $T_0$ and $T_1$ respectively.\\
Suppose the statement is still valid when $k = n-1$, thus, there exists $Q_{n-1}$ s.t.
\begin{equation}
(cos\theta)^{n-1} = Q_{n-1}
\end{equation}
When it comes to $k = n$, with the induction hypothesis,
\begin{equation}
\begin{aligned}
(cos\theta)^n & = Q_{n-1} \times cos\theta\\
& = (\frac{a_0}{\sqrt{2}} + \sum_{k=1}^{n-1}a_k cosk\theta) cos\theta\\
& = \frac{a_0}{\sqrt{2}}cos\theta + \sum_{k=1}^{n-1}a_k (\frac{cos(k+1)\theta + cos(k-1)\theta}{2})\\
& = \frac{b_0}{\sqrt{2}} + \sum_{k=0}^{n} b_k cosk\theta \triangleq Q_n
\end{aligned}
\end{equation}
Thus, $(cos\theta)^n$ is in $T_n$. Hence, for any $0 \leq k \leq n$, $(cos\theta)^k$ is in $T_n$ as $T_k \subseteq T_n$.\\
(the bijection part haven't figured out yet ...)
\end{proof}
\item
\begin{proof}
As $Q_n(\theta_i) = F(\theta_i)$, the existence of $Q_n \in T_n$ is equivalent to the existence of solution of the linear system given as below
\begin{equation}
\begin{bmatrix}
\frac{1}{\sqrt{2}} & cos(\theta_0) & cos(2\theta_0) & ... & cos(n\theta_0) \\
\frac{1}{\sqrt{2}} & cos(\theta_1) & cos(2\theta_1) & ... & cos(n\theta_1) \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
\frac{1}{\sqrt{2}} & cos(\theta_n) & cos(2\theta_n) & ... & cos(n\theta_n) \\
\end{bmatrix}
\begin{bmatrix}
a_0 \\ a_1 \\ a_2 \\ \vdots \\ a_n\\
\end{bmatrix}
=
\begin{bmatrix}
F(\theta_0) \\ F(\theta_1) \\ F(\theta_2) \\ \vdots \\ F(\theta_n)\\
\end{bmatrix}
\end{equation}
Thus, it's left to prove the determinant of the coefficient matrix is non-zero, which can be denoted as $\det(A)$.\\
As $cos(k\theta)$ can be written as
\begin{equation}
cos(k\theta) = \sum_{n=0}^{k} b_n(cos\theta)^n
\end{equation}
Thus, $\det(A)$ can be simplified as below, where $C$ is an non-zero factor.
\begin{equation}
\det(A) = C \det
\begin{bmatrix}
1 & cos(\theta_0) & cos^2(\theta_0) & ... & cos^n(\theta_0) \\
1 & cos(\theta_1) & cos^2(\theta_1) & ... & cos^n(\theta_1) \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & cos(\theta_n) & cos^2(\theta_n) & ... & cos^n(\theta_n) \\
\end{bmatrix}
\end{equation}
With $\theta_i = \frac{2i+1}{2(n+1)}\pi$, it's clear that $\det(A)\neq0$ as its Vandermon derterminant part is non-zero. Thus the existence of $Q_n$ is valid.
\end{proof}
\item
\begin{proof}
It has been exactly proved above.
\end{proof}
\item
\begin{proof}
\end{proof}
\end{enumerate}
\end{document} | {
"alphanum_fraction": 0.542334393,
"avg_line_length": 39.4194373402,
"ext": "tex",
"hexsha": "31d8239bb05c857e9f693cb9b8d2dd1b2ad576df",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-10-27T08:35:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-10-27T08:35:24.000Z",
"max_forks_repo_head_hexsha": "407a952bdf98f6ca161291fe50c5c903c72efb28",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cangyu/lambdaflow",
"max_forks_repo_path": "VV570/HW5/submission.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "407a952bdf98f6ca161291fe50c5c903c72efb28",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cangyu/lambdaflow",
"max_issues_repo_path": "VV570/HW5/submission.tex",
"max_line_length": 326,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "407a952bdf98f6ca161291fe50c5c903c72efb28",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cangyu/lambdaflow",
"max_stars_repo_path": "VV570/HW5/submission.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-22T14:24:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-22T14:24:06.000Z",
"num_tokens": 5899,
"size": 15413
} |
\subsubsection{test subsubsection}
% some comments
This is testing a subsubsection.
| {
"alphanum_fraction": 0.8023255814,
"avg_line_length": 14.3333333333,
"ext": "tex",
"hexsha": "7ae86bb10b22877d6f1d4d3cc9806274eb62a621",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2019-04-11T14:26:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-01T21:37:18.000Z",
"max_forks_repo_head_hexsha": "30bacb5a26e4e0a7533dba530782e55a98c9e6b3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hbachchas/clean-latex-to-arxiv",
"max_forks_repo_path": "example/subsubsection.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "30bacb5a26e4e0a7533dba530782e55a98c9e6b3",
"max_issues_repo_issues_event_max_datetime": "2021-10-06T19:06:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-06-08T17:16:21.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hbachchas/clean-latex-to-arxiv",
"max_issues_repo_path": "example/subsubsection.tex",
"max_line_length": 34,
"max_stars_count": 21,
"max_stars_repo_head_hexsha": "fd5ffd042dc8ac093518262afa670ca7c827a456",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lukeolson/clean-latex-to-arxiv",
"max_stars_repo_path": "example/subsubsection.tex",
"max_stars_repo_stars_event_max_datetime": "2021-07-27T22:25:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-04-17T02:57:20.000Z",
"num_tokens": 19,
"size": 86
} |
%% Copyright (C) 2008-2011, Gostai S.A.S.
%%
%% This software is provided "as is" without warranty of any kind,
%% either expressed or implied, including but not limited to the
%% implied warranties of fitness for a particular purpose.
%%
%% See the LICENSE file for more information.
\section{List}
\lstinline|List|s implement possibly-empty ordered (heterogeneous)
collections of objects.
\subsection{Prototypes}
\begin{refObjects}
\item[Container]
\item[RangeIterable]~\\
Therefore lists also support \refSlot[RangeIterable]{all} and
\refSlot[RangeIterable]{any}.
\begin{urbiassert}
// Are all elements positive?
! [-2, 0, 2, 4].all(function (e) { 0 < e });
// Are all elements even?
[-2, 0, 2, 4].all(function (e) { e % 2 == 0 });
// Is there any even element?
! [-3, 1, -1].any(function (e) { e % 2 == 0 });
// Is there any positive element?
[-3, 1, -1].any(function (e) { 0 < e });
\end{urbiassert}
\item[Orderable]
\end{refObjects}
\subsection{Construction}
Lists can be created with their literal syntax: a possibly empty sequence of
expressions in square brackets, separated by commas. Non-empty lists may
end with a comma (\autoref{sec:lang:list}).
\begin{urbiscript}
[]; // The empty list
[00000000] []
[1, "2", [3,],];
[00000000] [1, "2", [3]]
\end{urbiscript}
However, \lstinline|new| can be used as expected.
\begin{urbiscript}
List.new();
[00000001] []
[1, 2, 3].new();
[00000002] [1, 2, 3]
\end{urbiscript}
\subsection{Slots}
\begin{urbiscriptapi}
\item['*'](<n>)%
Return the target, concatenated \var{n} times to itself.
\begin{urbiassert}
[0, 1] * 0 == [];
[0, 1] * 3 == [0, 1, 0, 1, 0, 1];
\end{urbiassert}
\var{n} must be a non-negative integer.
\begin{urbiscript}
[0, 1] * -2;
[00000063:error] !!! *: argument 1: expected non-negative integer: -2
\end{urbiscript}
Note that since it is the very same list which is repeatedly
concatenated (the content is not cloned), side-effects on one item
will reflect on ``all the items''.
\begin{urbiscript}
var l = [[]] * 3;
[00000000] [[], [], []]
l[0] << 1;
[00000000] [1]
l;
[00000000] [[1], [1], [1]]
\end{urbiscript}
\begin{urbicomment}
removeSlots("l");
\end{urbicomment}
\item['+'](<other>)%
Return the concatenation of the target and the \var{other} list.
\begin{urbiassert}
[0, 1] + [2, 3] == [0, 1, 2, 3];
[] + [2, 3] == [2, 3];
[0, 1] + [] == [0, 1];
[] + [] == [];
\end{urbiassert}
The target is left unmodified (contrary to \refSlot{'+='}).
\begin{urbiassert}
var l = [1, 2, 3];
l + l == [1, 2, 3, 1, 2, 3];
l == [1, 2, 3];
\end{urbiassert}
\item['+='](<that>)%
Concatenate the contents of the List \var{that} to \this, and return
\this. This function modifies its target, contrary to \refSlot{'+'}. See
also \refSlot{'<<'}.
\begin{urbiassert}
var l = [];
var alias = l;
(l += [1, 2]) == l;
l == [1, 2];
(l += [3, 4]) == l;
l == [1, 2, 3, 4];
alias == [1, 2, 3, 4];
\end{urbiassert}
\item['-'](<other>)%
Return the target without the elements that are equal to any element
in the \var{other} list.
\begin{urbiassert}
[0, 1, 0, 2, 3] - [1, 2] == [0, 0, 3];
[0, 1, 0, 1, 0] - [1, 2] == [0, 0, 0];
\end{urbiassert}
\item['<'](<that>)%
Whether \this is less than the \that List. This is the lexicographic
comparison: \this is ``less than'' \that if, from left to right, one of
its member is ``less than'' the corresponding member of \that:
\begin{urbiassert}
[0, 0, 0] < [0, 0, 1]; !([0, 0, 1] < [0, 0, 0]);
[0, 1, 2] < [0, 2, 1]; !([0, 2, 1] < [0, 1, 2]);
[1, 1, 1] < [2]; !([2] < [1, 1, 1]);
!([0, 1, 2] < [0, 1, 2]);
\end{urbiassert}
\noindent
or \that is a prefix (strict) of \this:
\begin{urbiassert}
[] < [0]; !( [0] < []);
[0, 1] < [0, 1, 2]; !([0, 1, 2] < [0, 1]);
!([0, 1, 2] < [0, 1, 2]);
\end{urbiassert}
Since List derives from \refObject{Orderable}, the other order-based
operators are defined.
\begin{urbiassert}
[] <= [];
[] <= [0, 1, 2];
[0, 1, 2] <= [0, 1, 2];
[] >= [];
[0, 1, 2] >= [];
[0, 1, 2] >= [0, 1, 2];
[0, 1, 2] >= [0, 0, 2];
!([] > []);
[0, 1, 2] > [];
!([0, 1, 2] > [0, 1, 2]);
[0, 1, 2] > [0, 0, 2];
\end{urbiassert}
\item['<<'](<that>)%
A synonym for \lstinline|insertBack|.
\item['=='](<that>)%
Check whether all elements in the target and \var{that}, are
equal two by two.
\begin{urbiassert}
[0, 1, 2] == [0, 1, 2];
!([0, 1, 2] == [0, 0, 2]);
\end{urbiassert}
\item|'[]'|(<n>)%
Return the \var{n}th member of the target (indexing is
zero-based). If \var{n} is negative, start from the end. An error
if out of bounds.
\begin{urbiscript}
assert
{
["0", "1", "2"][0] == "0";
["0", "1", "2"][2] == "2";
};
["0", "1", "2"][3];
[00007061:error] !!! []: invalid index: 3
assert
{
["0", "1", "2"][-1] == "2";
["0", "1", "2"][-3] == "0";
};
["0", "1", "2"][-4];
[00007061:error] !!! []: invalid index: -4
\end{urbiscript}
\item|'[]='|(<index>, <value>)%
Assign \var{value} to the element of the target at the given
\var{index}.
\begin{urbiscript}
var f = [0, 1, 2]|;
assert
{
(f[1] = 42) == 42;
f == [0, 42, 2];
};
for (var i: [0, 1, 2])
f[i] = 10 * f[i];
assert (f == [0, 420, 20]);
\end{urbiscript}
\item[asTree]%
Display the content of the List as a tree representation.
\begin{urbiscript}
echo("simple list:" + ["a", "b", ["d", "e", "f", "g"]].asTree());
[:][00000004] *** simple list:
[:][
[:] a,
[:] b,
[:] [
[:] d,
[:] e,
[:] f,
[:] g,
[:] ]
[:]]
echo("list with dictionary:" +
["a", "b", ["c" => ["d", "e"], "f" => "g"]].asTree());
[:][00000005] *** list with dictionary:
[:][
[:] a,
[:] b,
[:] [
[:] c =>
[:] [
[:] d,
[:] e,
[:] ]
[:] f => g,
[:] ]
[:]]
\end{urbiscript}
\item[append](<that>)%
Deprecated alias for \refSlot{'+='}.
\begin{urbiscript}
var one = [1]|;
one.append(["one", [1]]);
[00000005:warning] !!! `list.append(that)' is deprecated, use `list += that'
[00000005] [1, "one", [1]]
\end{urbiscript}
\item \labelSlot{argMax}\lstinline|(\var{fun} = function(a, b) { a < b })|\\%
The index of the (leftmost) ``largest'' member based on the comparison
function \var{fun}.
\begin{urbiassert}
[1].argMax() == 0;
[1, 2].argMax() == 1;
[1, 2, 2].argMax() == 1;
[2, 1].argMax() == 0;
[2, -1, 3, -4].argMax() == 2;
[2, -1, 3, -4].argMax (function (a, b) { a.abs() < b.abs() }) == 3;
\end{urbiassert}
The list cannot be empty.
\begin{urbiscript}
[].argMax();
[00000007:error] !!! argMax: list cannot be empty
\end{urbiscript}
\item \labelSlot{argMin}\lstinline|(\var{fun} = function(a, b) { a < b })|\\%
The index of the (leftmost) ``smallest'' member based on the comparison
function \var{fun}.
\begin{urbiassert}
[1].argMin() == 0;
[1, 2].argMin() == 0;
[1, 2, 1].argMin() == 0;
[2, 1].argMin() == 1;
[2, -1, 3, -4].argMin() == 3;
[2, -1, 3, -4].argMin (function (a, b) { a.abs() < b.abs() }) == 1;
\end{urbiassert}
The list cannot be empty.
\begin{urbiscript}
[].argMin();
[00000011:error] !!! argMin: list cannot be empty
\end{urbiscript}
\item[asBool]
Whether not empty.
\begin{urbiassert}
[].asBool() == false;
[1].asBool() == true;
\end{urbiassert}
\item[asList]
Return the target.
\begin{urbiassert}
var l = [0, 1, 2];
l.asList() === l;
\end{urbiassert}
\item[asString]
A string describing the list. Uses \lstinline|asPrintable| on its
members, so that, for instance, strings are displayed with quotes.
\begin{urbiassert}
[0, [1], "2"].asString() == "[0, [1], \"2\"]";
\end{urbiassert}
\item[back]
The last element of the target. An error if the target is empty.
\begin{urbiscript}
assert([0, 1, 2].back() == 2);
[].back();
[00000017:error] !!! back: cannot be applied onto empty list
\end{urbiscript}
\item[clear]
Empty the target, return it.
\begin{urbiassert}
var l = [0, 1, 2];
l.clear() === l == [];
l.clear() === l == [];
\end{urbiassert}
\item[each](<fun>)%
Apply the given functional value \var{fun} on all members, sequentially.
\begin{urbiscript}
[0, 1, 2].each(function (v) {echo (v * v); echo (v * v)});
[00000000] *** 0
[00000000] *** 0
[00000000] *** 1
[00000000] *** 1
[00000000] *** 4
[00000000] *** 4
\end{urbiscript}
\item['each&'](<fun>)%
Apply the given functional value on all members simultaneously.
\begin{urbiscript}
[0, 1, 2].'each&'(function (v) {echo (v * v); echo (v * v)});
[00000000] *** 0
[00000000] *** 1
[00000000] *** 4
[00000000] *** 0
[00000000] *** 1
[00000000] *** 4
\end{urbiscript}
\item[eachi](<fun>)%
Apply the given functional value \var{fun} on all members
sequentially, additionally passing the current element index.
\begin{urbiscript}
["a", "b", "c"].eachi(function (v, i) {echo ("%s: %s" % [i, v])});
[00000000] *** 0: a
[00000000] *** 1: b
[00000000] *** 2: c
\end{urbiscript}
\item[empty]
Whether the target is empty.
\begin{urbiassert}
[].empty;
! [1].empty;
\end{urbiassert}
\item[filter](<fun>)%
The list of all the members of the target that verify the predicate
\var{fun}.
\begin{urbiscript}
do ([0, 1, 2, 3, 4, 5])
{
assert
{
// Keep only odd numbers.
filter(function (v) {v % 2 == 1}) == [1, 3, 5];
// Keep all.
filter(function (v) { true }) == this;
// Keep none.
filter(function (v) { false }) == [];
};
}|;
\end{urbiscript}
\item[foldl](<action>, <value>)%
\wref[Fold_(higher-order_function)]{Fold},
also known as \dfn{reduce} or \dfn{accumulate}, computes a result
from a list. Starting from \var{value} as the initial result, apply
repeatedly the binary \var{action} to the current result and the
next member of the list, from left to right. For instance, if
\var{action} were the binary addition and \var{value} were 0, then
folding a list would compute the sum of the list, including for
empty lists.
\begin{urbiassert}
[].foldl(function (a, b) { a + b }, 0) == 0;
[1, 2, 3].foldl(function (a, b) { a + b }, 0) == 6;
[1, 2, 3].foldl(function (a, b) { a - b }, 0) == -6;
\end{urbiassert}
\item[front]
Return the first element of the target. An error if the target is
empty.
\begin{urbiscript}
assert([0, 1, 2].front() == 0);
[].front();
[00000000:error] !!! front: cannot be applied onto empty list
\end{urbiscript}
\item[has](<that>)%
Whether \that equals one of the members.
\begin{urbiassert}
[0, 1, 2].has(1);
! [0, 1, 2].has(5);
\end{urbiassert}
The infix operators \lstinline|in| and \lstinline|not in| use
\lstinline|has| (see \autoref{sec:lang:op:containers}).
\begin{urbiassert}
1 in [0, 1];
2 not in [0, 1];
!(2 in [0, 1]);
!(1 not in [0, 1]);
\end{urbiassert}
\item[hash] A \refObject{Hash} object corresponding to this list
value. Equal lists (in the sense of \refSlot{'=='}) have equal hashes, see
\refSlot[Object]{hash}.
\begin{urbiassert}
[].hash().isA(Hash);
[].hash() == [].hash();
[1, "foo"].hash() == [1, "foo"].hash();
[0, 1].hash() != [1, 0].hash();
\end{urbiassert}
\item[hasSame](<that>)%
Whether \that is is physically equal to one of the members.
\begin{urbiassert}
var x = 1;
[0, x, 2].hasSame(x);
![0, x, 2].hasSame(1);
\end{urbiassert}
\item[head]
Synonym for \refSlot{front}.
\begin{urbiscript}
assert([0, 1, 2].head() == 0);
[].head();
[00000000:error] !!! head: cannot be applied onto empty list
\end{urbiscript}
\item[insert](<where>, <what>)%
Insert \var{what} before the value at index \var{where}, return
\this.
\begin{urbiassert}
var l = [0, 1];
l.insert(0, 10) === l == [10, 0, 1];
l.insert(2, 20) === l == [10, 0, 20, 1];
\end{urbiassert}
The index must be valid, to insert past the end, use \refSlot{insertBack}.
\begin{urbiscript}
[].insert(0, "foo");
[00044239:error] !!! insert: invalid index: 0
[1, 2, 3].insert(4, 30);
[00044339:error] !!! insert: invalid index: 4
\end{urbiscript}
\item[insertBack](<that>)%
Insert \that at the end of the target, return \this.
\begin{urbiassert}
var l = [0, 1];
l.insertBack(2) === l;
l == [0, 1, 2];
\end{urbiassert}
\item[insertFront](<that>)%
Insert the given element at the beginning of the target. Return \this.
\begin{urbiassert}
var l = [0, 1];
l.insertFront(0) === l;
l == [0, 0, 1];
\end{urbiassert}
\item[insertUnique](<that>)%
If \that is not in \this, append it. Return \this.
\begin{urbiassert}
var l = [0, 1];
l.insertUnique(0) === l == [0, 1];
l.insertUnique(2) === l == [0, 1, 2];
\end{urbiassert}
\item[join](<sep> = "", <prefix> = "", <suffix> = "")%
Bounce to \refSlot[String]{join}.
\begin{urbiassert}
["", "ob", ""].join() == "ob";
["", "ob", ""].join("a") == "aoba";
["", "ob", ""].join("a", "B", "b") == "Baobab";
\end{urbiassert}
\item[keys]()%
The list of valid indexes.
\begin{urbiassert}
[].keys == [];
["a", "b", "c"].keys == [0, 1, 2];
\end{urbiassert}
This allows uniform iteration over a \refObject{Dictionary} or a
\refObject{List}.
\begin{urbiscript}
var l = ["a", "b", "c"]|;
var res = []|;
for (var k: l.keys)
res << l[k];
assert (res == l);
\end{urbiscript}
\begin{urbicomment}
removeSlots("l", "res");
\end{urbicomment}
\item[map](<fun>)%
Apply the given functional value on every member, and return the list of
results.
\begin{urbiassert}
[0, 1, 2, 3].map(function (v) { v % 2 == 0})
== [true, false, true, false];
[1, 2, 3].map (function (x) { x*2 })
== [2, 4, 6];
\end{urbiassert}
\item[matchAgainst](<handler>, <pattern>)%
If \var{pattern} is a List of same size, use \var{handler} to match each
member of \this against the corresponding \var{pattern}. Return true if
the match succeeded, false in other cases.
%% FIXME: We had to disable assertion about pattern matching.
\begin{urbiscript}
assert
{
([1, 2] = [1, 2]) == [1, 2];
};
([1, var a] = [1, 2]) == [1, 2];
[00004360] true
assert
{
a == 2;
};
([var u, var v, var w] = [1, 2, 3]) == [1, 2, 3];
[00004376] true
assert
{
[u, v, w] == [1, 2, 3];
};
[1, 2] = [2, 1];
[00005863:error] !!! pattern did not match
[1, var a] = [2, 1];
[00005864:error] !!! pattern did not match
[1, var a] = [1];
[00005865:error] !!! pattern did not match
[1, var a] = [1, 2, 3];
[00005865:error] !!! pattern did not match
\end{urbiscript}
\item \labelSlot{max}\lstinline|(\var{comp} = function(a, b) { a < b })|\\%
Return the ``largest'' member based on the comparison function \var{comp}.
\begin{urbiassert}
[1].max() == 1;
[1, 2].max() == 2;
[2, 1].max() == 2;
[2, -1, 3, -4].max() == 3;
[2, -1, 3, -4].max (function (a, b) { a.abs() < b.abs() }) == -4;
\end{urbiassert}
The list cannot be empty.
\begin{urbiscript}
[].max();
[00000001:error] !!! max: list cannot be empty
\end{urbiscript}
The members must be comparable.
\begin{urbiscript}
[0, 2, "a", 1].max();
[00000002:error] !!! max: argument 2: unexpected "a", expected a Float
\end{urbiscript}
\item \labelSlot{min}\lstinline|(\var{co;p} = function(a, b) { a < b })|\\%
Return the ``smallest'' member based on the comparison function \var{comp}.
\begin{urbiassert}
[1].min() == 1;
[1, 2].min() == 1;
[2, 1].min() == 1;
[2, -1, 3, -4].min() == -4;
[2, -1, 3, -4].min (function (a, b) { a.abs() < b.abs() }) == -1;
\end{urbiassert}
The list cannot be empty.
\begin{urbiscript}
[].min();
[00000001:error] !!! min: list cannot be empty
\end{urbiscript}
\item[range](<begin>, <end> = nil)%
Return a sub-range of the list, from the first index included to the
second index excluded. An error if out of bounds. Negative indices
are valid, and number from the end.
If \var{end} is \lstinline|nil|, calling \lstinline|range(\var{n})|
is equivalent to calling \lstinline|range(0, \var{n})|.
\begin{urbiscript}
do ([0, 1, 2, 3])
{
assert
{
range(0, 0) == [];
range(0, 1) == [0];
range(1) == [0];
range(1, 3) == [1, 2];
range(-3, -2) == [1];
range(-3, -1) == [1, 2];
range(-3, 0) == [1, 2, 3];
range(-3, 1) == [1, 2, 3, 0];
range(-4, 4) == [0, 1, 2, 3, 0, 1, 2, 3];
};
}|;
[].range(1, 3);
[00428697:error] !!! range: invalid index: 1
\end{urbiscript}
\item[remove](<val>)%
Remove all elements from the target that are equal to \var{val}, return
\this.
\begin{urbiassert}
var c = [0, 1, 0, 2, 0, 3];
c.remove(0) === c == [1, 2, 3];
c.remove(42) === c == [1, 2, 3];
\end{urbiassert}
\item[removeBack]
Remove and return the last element of the target. An error if the
target is empty.
\begin{urbiassert}
var t = [0, 1, 2];
t.removeBack() == 2;
t == [0, 1];
[].removeBack();
[00000000:error] !!! removeBack: cannot be applied onto empty list
\end{urbiassert}
\item[removeById](<that>)%
Remove all elements from the target that physically equals
\var{that}.
\begin{urbiassert}
var d = 1;
var e = [0, 1, d, 1, 2];
e.removeById(d) == [0, 1, 1, 2];
e == [0, 1, 1, 2];
\end{urbiassert}
\item[removeFront] Remove and return the first element from the target. An
error if the target is empty.
\begin{urbiassert}
var g = [0, 1, 2];
g.removeFront() == 0;
g == [1, 2];
[].removeFront();
[00000000:error] !!! removeFront: cannot be applied onto empty list
\end{urbiassert}
\item[reverse]
The target with the order of elements inverted.
\begin{urbiassert}
[0, 1, 2].reverse() == [2, 1, 0];
\end{urbiassert}
\item[size]
The number of elements in \this.
\begin{urbiassert}
[0, 1, 2].size == 3;
[].size == 0;
\end{urbiassert}
\item \labelSlot{sort}\lstinline|(\var{comp} = function(a, b) { a < b })|\\%
A new List with the contents of \this, sorted with respect to the
\var{comp} comparison function.
\begin{urbiassert}
var l = [3, 0, -2, 1];
l.sort() == [-2, 0, 1, 3];
l == [3, 0, -2, 1];
l.sort(function(a, b) {a.abs() < b.abs()})
== [0, 1, -2, 3];
\end{urbiassert}
\begin{urbiscript}
[2, 1].sort(1);
[00000001:error] !!! unexpected 1, expected a Executable
\end{urbiscript}
Following the \wref[Garbage_In,_Garbage_Out]{Garbage In, Garbage Out}
principle, if \var{comp} is not a strict weak ordering (e.g., if
\lstinline|comp(\var{a}, \var{b}) && comp(\var{b}, \var{a})| holds for some
\var{a} and \var{b}), the result is meaningless.
\begin{urbiscript}
[1, 2, 3].sort(function(a, b) { true });
[00011293] [2, 3, 1]
\end{urbiscript}
\item[subset](<that>)%
Whether the members of \this are members of \var{that}.
\begin{urbiassert}
[].subset([]);
[].subset([1, 2, 3]);
[3, 2, 1].subset([1, 2, 3]);
[1, 3].subset([1, 2, 3]);
[1, 1].subset([1, 2, 3]);
![3].subset([]);
![3, 2].subset([1, 2]);
![1, 2, 3].subset([1, 2]);
\end{urbiassert}
\item[tail]%
\this minus the first element. An error if the target is empty.
\begin{urbiscript}
assert([0, 1, 2].tail() == [1, 2]);
[].tail();
[00000000:error] !!! tail: cannot be applied onto empty list
\end{urbiscript}
\item[unique]%
A new List composed of a single (based on \lstinline|==| comparison) copy
of all the members of \this in no particular order.
\begin{urbiassert}
[].unique() == [];
[1].unique() == [1];
[1, 1].unique() == [1];
[1, 2, 3, 2, 1].unique() == [1, 2, 3];
\end{urbiassert}
\item[zip](<fun>, <other>)%
Zip \this list and the \var{other} list with the \var{fun} function, and
return the list of results.
\begin{urbiassert}
[1, 2, 3].zip(closure (x, y) { (x, y) }, [4, 5, 6])
== [(1, 4), (2, 5), (3, 6)];
[1, 2, 3].zip(closure (x, y) { x + y }, [4, 5, 6])
== [5, 7, 9];
\end{urbiassert}
\end{urbiscriptapi}
%%% Local Variables:
%%% coding: utf-8
%%% mode: latex
%%% TeX-master: "../urbi-sdk"
%%% ispell-dictionary: "american"
%%% ispell-personal-dictionary: "../urbi.dict"
%%% fill-column: 76
%%% End:
| {
"alphanum_fraction": 0.5784607569,
"avg_line_length": 22.6264367816,
"ext": "tex",
"hexsha": "9787d8a3b759eb9dce29c6a6491ea6362a688850",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2021-09-28T19:26:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-28T20:27:02.000Z",
"max_forks_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "jcbaillie/urbi",
"max_forks_repo_path": "doc/specs/list.tex",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb",
"max_issues_repo_issues_event_max_datetime": "2019-02-13T10:51:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-09-05T10:08:33.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "jcbaillie/urbi",
"max_issues_repo_path": "doc/specs/list.tex",
"max_line_length": 77,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "jcbaillie/urbi",
"max_stars_repo_path": "doc/specs/list.tex",
"max_stars_repo_stars_event_max_datetime": "2021-10-05T22:16:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-10T05:50:58.000Z",
"num_tokens": 7502,
"size": 19685
} |
\section*{\Large{\textbf{Lecture 1A - Course Administrivia}} \small{(March 15, 2021)}}
\label{sec:lecture-1A}
\subsection*{\large{\textbf{1A.1. Basic Information of Lecturers}}}
\label{ssec:lecture-1A1}
\noindent The \emph{professor} responsible for the \emph{lectures} and \emph{labs} is:
\begin{itemize}
\vspace{-0.2cm}
\item Prof. \emph{João \textsc{Lourenço}} - \href{mailto:[email protected]}{\emph{[email protected]}}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Office Location}:
\begin{itemize}
\vspace{-0.1cm}
\item \emph{Department of Informatics};
\item Building II - Room P2/9, Ext. 10740;
\end{itemize}
\end{itemize}
\end{itemize}
\subsection*{\large{\textbf{1A.2. Discussion Forums}}}
\label{ssec:lecture-1A2}
\noindent There will be available some \emph{discussion forums} such as the following ones:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Piazza}:
\begin{itemize}
\vspace{-0.2cm}
\item \href{piazza.com/fct.unl.pt/spring2021/cp11158/home}{\emph{piazza.com/fct.unl.pt/spring2021/cp11158/home}};
\end{itemize}
\end{itemize}
\subsection*{\large{\textbf{1A.3. Main Bibliography}}}
\label{ssec:lecture-1A3}
\noindent The \emph{main bibliography} is the following:
\begin{itemize}
\item \textbf{Structured Parallel Programming: Patterns for Efficient\\Computation}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Michael \textsc{McCool}}, \emph{Arch \textsc{Robinson}} and \emph{James \textsc{Reinders}};
\item \emph{Morgan Kaufmann}, 2012;
\item \emph{ISBN}: 978-0-12-415993-8;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/main-bibliography/structured-parallel-programming-patterns-for-efficient-computation-michael-mccool-arch-robison-and-james-reinders.pdf}{\emph{Click here to download}};
\end{itemize}
\item \textbf{Patterns for Parallel Programming}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Tim \textsc{Mattson}}, \emph{Beverly \textsc{Sanders}} and \emph{Berna \textsc{Massingill}};
\item \emph{Addison-Wesley}, 2014;
\item \emph{ISBN}: 0-321-22811-1;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/main-bibliography/patterns-for-parallel-programming-tim-mattson-beverly-sanders-and-berna-massingill.pdf}{\emph{Click here to download}};
\end{itemize}
\newpage
\item \textbf{Concurrent Programming: Algorithms, Principles, and\\Foundations}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Michael \textsc{Raynal}};
\item \emph{Springer-Verlag Berlin Heidelberg}, 2013;
\item \emph{ISBN}: 978-3-642-32026-2;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/main-bibliography/concurrent-programming-algorithms-principles-and-foundations-michel-raynal.pdf}{\emph{Click here to download}};
\end{itemize}
\end{itemize}
\subsection*{\large{\textbf{1A.4. Additional Bibliography}}}
\label{ssec:lecture-1A4}
\noindent The \emph{additional bibliography} is the following:
\begin{itemize}
\item \textbf{Programming Concurrency on the JVM: Mastering\\Synchronization, STM, and Actors}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Venkat \textsc{Suhramaniam}};
\item \emph{Pragmatic Bookshelf}, 2011;
\item \emph{ISBN}: 978-1-934356-76-0;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/additional-bibliography/programming-concurrency-on-the-jvm-mastering-synchronization-stm-and-actors-venkat-subramaniam.pdf}{\emph{Click here to download}};
\end{itemize}
\item \textbf{The Art of Multiprocessor Programming}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Maurice \textsc{Herlihy}}, \emph{Nir \textsc{Shavit}}, \emph{Victor \textsc{Luchangco}} and \emph{Michael \textsc{Spear}};
\item \emph{Morgan Kauffman}, 2021;
\item \emph{ISBN}: 978-0-12-415950-1;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/additional-bibliography/the-art-of-multiprocessor-programming-maurice-herlihy-nir-shavit-victor-luchangco-and-michael-spear.pdf}{\emph{Click here to download}};
\end{itemize}
\item \textbf{Shared-Memory Synchronization}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Michael \textsc{Scott}};
\item \emph{Morgan \& Claypool}, 2013;
\item \emph{ISBN}: 978-1-608-45956-8;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/additional-bibliography/shared-memory-synchronization-michael-scott.pdf}{\emph{Click here to download}};
\end{itemize}
\newpage
\item \textbf{Principles of Concurrent and Distributed Programming}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Michael \textsc{Ben-Ari}};
\item \emph{Pearson}, 2006;
\item \emph{ISBN}: 978-0-321-31283-9;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/additional-bibliography/principles-of-concurrent-and-distributed-programming-algorithms-and-models-mordechai-ben-ari.pdf}{\emph{Click here to download}};
\end{itemize}
\end{itemize}
\subsection*{\large{\textbf{1A.5. Other Bibliography}}}
\label{ssec:lecture-1A5}
\noindent Other \emph{recommended} \emph{bibliography} is the following:
\begin{itemize}
\item \textbf{Pro Git}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Scott \textsc{Chacon}} and \emph{Scott \textsc{Chacon}};
\item \emph{Apress}, 2014;
\item \emph{ISBN}: 978-1-4842-0076-6;
\item \href{https://github.com/rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs/raw/main/books/others/pro-git-scott-chacon-and-ben-straub.pdf}{\emph{Click here to download}};
\end{itemize}
\end{itemize}
\subsection*{\large{\textbf{1A.6. Syllabus}}}
\label{ssec:lecture-1A6}
\noindent The structure of the \emph{course} is described by the following enumerated topics:
\begin{enumerate}
\item \textbf{Parallel Architectures}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Flynn's Taxonomy};
\item \emph{Performance Theory (including Amdahl's and Gustafson's Laws)};
\end{itemize}
\item \textbf{Parallel Programming}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{The spectrum of high-demanding computational problems};
\item \emph{Regular and irregular problems};
\item \emph{Strategies for problem decomposition and their mapping to\\programming patterns};
\item \emph{The transactional and Map-Reduce models};
\end{itemize}
\newpage
\item \textbf{Concurrency Control and Synchronization}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Competition and Collaboration};
\item \emph{Atomicity};
\item \emph{Linearization};
\item \emph{Monitors};
\item \emph{Locks}
\item \emph{Semaphores};
\item \emph{Barriers};
\item \emph{Producer-Consumer};
\item \emph{Multi-Reader Single-Writer Locks};
\item \emph{Futures};
\item \emph{Concurrency in Practice in Java and C};
\end{itemize}
\item \textbf{Safety and Liveness}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Safety vs. Liveness};
\item \emph{Progress};
\item \emph{Deadlock};
\item \emph{Deadlock Prevention, Avoidance, Detection, and Recovery}; \item \emph{Livelock};
\item \emph{Livelock Avoidance};
\item \emph{Priority Inversion};
\item \emph{Priority Inheritance};
\item \emph{Lock-Free Algorithms};
\end{itemize}
\item \textbf{The Transactional Model}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Composite Operations};
\item \emph{Transactions (Serializability);
\item \emph{Optimistic Concurrency Control (OCC)}};
\item \emph{Transactional Memory};
\end{itemize}
\newpage
\item \textbf{Concurrency without Shared Data}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Active Objects};
\item \emph{Message Passing};
\item \emph{Actors};
\end{itemize}
\end{enumerate}
\subsection*{\large{\textbf{1A.7. Evaluation}}}
\label{ssec:lecture-1A7}
\begin{itemize}
\vspace{-0.2cm}
\item 60\% - \textbf{2 \emph{Tests} (\emph{Individual} and \emph{Online})} [average $\geq$ 8.5 points];
\vspace{-0.1cm}
\item 40\% - \textbf{1 \emph{Project} (Groups of 3 Students)} [grade $\geq$ 8.5 points];
\vspace{-0.1cm}
\item 3\% - \textbf{Participation in \emph{Classes' Life Cycle}}:
\begin{itemize}
\vspace{-0.2cm}
\item \emph{Lectures};
\vspace{-0.1cm}
\item \emph{Labs};
\vspace{-0.1cm}
\item \emph{Piazza};
\end{itemize}
\end{itemize}
\clearpage
| {
"alphanum_fraction": 0.6722185935,
"avg_line_length": 43.3301886792,
"ext": "tex",
"hexsha": "d6470afcbd6ea19cdd5f6eb4933482ba45c3b344",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "34f868dabde4ee6b8e9791857567942fb4219b2a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs",
"max_forks_repo_path": "lectures-and-labs-notes/tex/lectures/lecture-1/lecture-1A.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "34f868dabde4ee6b8e9791857567942fb4219b2a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs",
"max_issues_repo_path": "lectures-and-labs-notes/tex/lectures/lecture-1/lecture-1A.tex",
"max_line_length": 283,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "34f868dabde4ee6b8e9791857567942fb4219b2a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rubenandrebarreiro/fct-nova-concurrency-and-parallelism-labs",
"max_stars_repo_path": "lectures-and-labs-notes/tex/lectures/lecture-1/lecture-1A.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-16T13:05:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-03T11:44:05.000Z",
"num_tokens": 2971,
"size": 9186
} |
\newpage
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\section{Appendix: The grammar of \Mouse\ PEG}
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\small
\begin{Verbatim}[frame=single,framesep=2mm,samepage=true,xleftmargin=15mm,xrightmargin=15mm,baselinestretch=0.8]
Grammar = Space (Rule/Skip)*+ EOT ;
Rule = Name EQUAL RuleRhs DiagName? SEMI ;
Skip = SEMI
/ _++ (SEMI/EOT) ;
RuleRhs = Sequence Actions (SLASH Sequence Actions)* ;
Choice = Sequence (SLASH Sequence)* ;
Sequence = Prefixed+ ;
Prefixed = PREFIX? Suffixed ;
Suffixed = Primary (UNTIL Primary / SUFFIX)? ;
Primary = Name
/ LPAREN Choice RPAREN
/ ANY
/ StringLit
/ Range
/ CharClass ;
Actions = OnSucc OnFail ;
OnSucc = (LWING AND? Name? RWING)? ;
OnFail = (TILDA LWING Name? RWING)? ;
Name = Letter (Letter / Digit)* Space ;
DiagName = "<" Char++ ">" Space ;
StringLit = ["] Char++ ["] Space ;
CharClass = ("[" / "^[") Char++ "]" Space ;
Range = "[" Char "-" Char "]" Space ;
Char = Escape
/ ^[\r\n\\] ;
Escape = "\\u" HexDigit HexDigit HexDigit HexDigit
/ "\\t"
/ "\\n"
/ "\\r"
/ !"\\u""\\"_ ;
Letter = [a-z] / [A-Z] ;
Digit = [0-9] ;
HexDigit = [0-9] / [a-f] / [A-F] ;
PREFIX = [&!] Space ;
SUFFIX = [?*+] Space ;
UNTIL = ("*+" / "++") Space ;
EQUAL = "=" Space ;
SEMI = ";" Space ;
SLASH = "/" Space ;
AND = "&" Space ;
LPAREN = "(" Space ;
RPAREN = ")" Space ;
LWING = "{" Space ;
RWING = "}" Space ;
TILDA = "~" Space ;
ANY = "_" Space ;
Space = ([ \r\n\t] / Comment)* ;
Comment = "//" _*+ EOL ;
EOL = [\r]? [\n] / !_ ;
EOT = !_ ;
\end{Verbatim}
\normalsize
| {
"alphanum_fraction": 0.5061099796,
"avg_line_length": 30.6875,
"ext": "tex",
"hexsha": "6c53568a13872695a37624e44cc38c90df00553b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "celer/mouse",
"max_forks_repo_path": "Mouse/source/manual/AppendixA.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_issues_repo_issues_event_max_datetime": "2016-05-31T11:00:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-04-07T06:22:47.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "celer/mouse",
"max_issues_repo_path": "Mouse/source/manual/AppendixA.tex",
"max_line_length": 113,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "celer/mouse",
"max_stars_repo_path": "Mouse/source/manual/AppendixA.tex",
"max_stars_repo_stars_event_max_datetime": "2017-04-08T14:06:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-30T11:17:56.000Z",
"num_tokens": 663,
"size": 1964
} |
\chapter{Results}
In this section, the results will be presented. First the hardware evaluation of different HMD, then answers and insights from expert interviews. The part will be dedicated to the prototypes that were created based on the results from the previous sections.
\subimport{subsections/}{hardware.tex}
\subimport{subsections/}{interviews.tex}
\subimport{subsections/}{prototypes.tex}
| {
"alphanum_fraction": 0.8136020151,
"avg_line_length": 66.1666666667,
"ext": "tex",
"hexsha": "e97b43f81767eaffb2c9b2a1aa005685ab2eea25",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "27e877c9dbe4bed2cab475005d45c3de05706dfb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "davidbergvik/Master-Thesis-IxD",
"max_forks_repo_path": "report/sections/results/results.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "27e877c9dbe4bed2cab475005d45c3de05706dfb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "davidbergvik/Master-Thesis-IxD",
"max_issues_repo_path": "report/sections/results/results.tex",
"max_line_length": 257,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "27e877c9dbe4bed2cab475005d45c3de05706dfb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "davidbergvik/Master-Thesis-IxD",
"max_stars_repo_path": "report/sections/results/results.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 88,
"size": 397
} |
\section{String base object and event level tags}
\label{App:StringCuts}
\subsection{Variables for run cuts}
{\ttfamily \noindent
\begin{verbatim}
Int_t fAliceRunId; //the run id
Float_t fAliceMagneticField; //value of the magnetic field
Int_t fAliceRunStartTimeMin; //minimum run start date
Int_t fAliceRunStartTimeMax; //maximum run start date
Int_t fAliceRunStopTimeMin; //minmum run stop date
Int_t fAliceRunStopTimeMax; //maximum run stop date
TString fAlirootVersion; //aliroot version
TString fRootVersion; //root version
TString fGeant3Version; //geant3 version
Bool_t fAliceRunQuality; //validation script
Float_t fAliceBeamEnergy; //beam energy cm
TString fAliceBeamType; //run type (pp, AA, pA)
Int_t fAliceCalibrationVersion; //calibration version
Int_t fAliceDataType; //0: simulation -- 1: data
\end{verbatim}
}
\subsection{Variables for event cuts}
To invoke one of these cuts, please make sure to use the {\ttfamily fEventTag.} identifier. Example: {\ttfamily "fEventTag.fNParticipants < 100"}.
\par
{\ttfamily \noindent
\begin{verbatim}
Int_t fNParticipantsMin, fNParticipantsMax;
Float_t fImpactParamMin, fImpactParamMax;
Float_t fVxMin, fVxMax;
Float_t fVyMin, fVyMax;
Float_t fVzMin, fVzMax;
Int_t fPrimaryVertexFlag;
Float_t fPrimaryVertexZErrorMin, fPrimaryVertexZErrorMax;
ULong64_t fTriggerMask;
UChar_t fTriggerCluster;
Float_t fZDCNeutron1EnergyMin, fZDCNeutron1EnergyMax;
Float_t fZDCProton1EnergyMin, fZDCProton1EnergyMax;
Float_t fZDCNeutron2EnergyMin, fZDCNeutron2EnergyMax;
Float_t fZDCProton2EnergyMin, fZDCProton2EnergyMax;
Float_t fZDCEMEnergyMin, fZDCEMEnergyMax;
Float_t fT0VertexZMin, fT0VertexZMax;
Int_t fMultMin, fMultMax;
Int_t fPosMultMin, fPosMultMax;
Int_t fNegMultMin, fNegMultMax;
Int_t fNeutrMultMin, fNeutrMultMax;
Int_t fNV0sMin, fNV0sMax;
Int_t fNCascadesMin, fNCascadesMax;
Int_t fNKinksMin, fNKinksMax;
Int_t fNPMDTracksMin, fNPMDTracksMax;
Int_t fNFMDTracksMin, fNFMDTracksMax;
Int_t fNPHOSClustersMin, fNPHOSClustersMax;
Int_t fNEMCALClustersMin, fNEMCALClustersMax;
Int_t fNJetCandidatesMin, fNJetCandidatesMax;
Float_t fTopJetEnergyMin;
Float_t fTopNeutralEnergyMin;
Int_t fNHardPhotonCandidatesMin, fNHardPhotonCandidatesMax;
Int_t fNChargedAbove1GeVMin, fNChargedAbove1GeVMax;
Int_t fNChargedAbove3GeVMin, fNChargedAbove3GeVMax;
Int_t fNChargedAbove10GeVMin, fNChargedAbove10GeVMax;
Int_t fNMuonsAbove1GeVMin, fNMuonsAbove1GeVMax;
Int_t fNMuonsAbove3GeVMin, fNMuonsAbove3GeVMax;
Int_t fNMuonsAbove10GeVMin, fNMuonsAbove10GeVMax;
Int_t fNElectronsAbove1GeVMin, fNElectronsAbove1GeVMax;
Int_t fNElectronsAbove3GeVMin, fNElectronsAbove3GeVMax;
Int_t fNElectronsAbove10GeVMin,fNElectronsAbove10GeVMax;
Int_t fNElectronsMin, fNElectronsMax;
Int_t fNMuonsMin, fNMuonsMax;
Int_t fNPionsMin, fNPionsMax;
Int_t fNKaonsMin, fNKaonsMax;
Int_t fNProtonsMin, fNProtonsMax;
Int_t fNLambdasMin, fNLambdasMax;
Int_t fNPhotonsMin, fNPhotonsMax;
Int_t fNPi0sMin, fNPi0sMax;
Int_t fNNeutronsMin, fNNeutronsMax;
Int_t fNKaon0sMin, fNKaon0sMax;
Float_t fTotalPMin, fTotalPMax;
Float_t fMeanPtMin, fMeanPtMax;
Float_t fTopPtMin;
Float_t fTotalNeutralPMin, fTotalNeutralPMax;
Float_t fMeanNeutralPtMin, fMeanNeutralPtMax;
Float_t fTopNeutralPtMin;
Float_t fEventPlaneAngleMin, fEventPlaneAngleMax;
Float_t fHBTRadiiMin, fHBTRadiiMax;
\end{verbatim}
} | {
"alphanum_fraction": 0.7731673582,
"avg_line_length": 38.8709677419,
"ext": "tex",
"hexsha": "66ed267cebcbba2f99bec6e2870ac29ae9e9dafe",
"lang": "TeX",
"max_forks_count": 275,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T13:06:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-06-21T20:24:05.000Z",
"max_forks_repo_head_hexsha": "c53712645bf1c7d5f565b0d3228e3a6b9b09011a",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "AllaMaevskaya/AliRoot",
"max_forks_repo_path": "doc/Distributed-Analysis/StringCuts.tex",
"max_issues_count": 1388,
"max_issues_repo_head_hexsha": "c53712645bf1c7d5f565b0d3228e3a6b9b09011a",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T15:26:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-11-01T10:27:36.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "AllaMaevskaya/AliRoot",
"max_issues_repo_path": "doc/Distributed-Analysis/StringCuts.tex",
"max_line_length": 146,
"max_stars_count": 52,
"max_stars_repo_head_hexsha": "c53712645bf1c7d5f565b0d3228e3a6b9b09011a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "AllaMaevskaya/AliRoot",
"max_stars_repo_path": "doc/Distributed-Analysis/StringCuts.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-11T11:49:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-11T13:04:01.000Z",
"num_tokens": 1225,
"size": 3615
} |
\subsection{Exponential separation of $\braket{X}$-Isomorphism-QMDD}
In this section, we separate $\braket{X}$-Isomorphism-QMDD from QMDD by giving a quantum state which requires $2^{\Omega(\sqrt{n})}$ as a QMDD, but has an $\braket{X}$-Isomorphism-QMDD with only $\mathcal O(n)$ nodes.
By $\braket{X}$-Isomorphism-QMDD, we mean that the only isomorphisms that are allowed to appear on the diagram's Isomorphism nodes are of the form $A_1\otimes\cdots\otimes A_n$ where $A_i$ is either $I=\begin{smallmat}1 & 0 \\ 0 & 1\end{smallmat}$ or $X=\begin{smallmat}0 & 1 \\ 1 & 0\end{smallmat}$.
Duris et al. show the following lower bound on the size of nondeterministic branching programs.
\begin{theorem}[\v{D}uri\v{s} et al.\cite{vdurivs2004multi}]
\label{thm:random-vector-space-hard-for-bdd}
The characteristic function $f_V$ of a randomly chosen vector space $V$ in $\mathbb F_2^n$ needs a (non-) deterministic branching program of size $2^{\Omega(n)}/(2n)$ with high probability.
\end{theorem}
For us, it suffices to use the fact that the bound holds for deterministic branching programs, because it implies the following.
% this means that the uniform superposition over $A_n$ is a quantum state which has a large QMDD.
\begin{theorem}
For a random vector space $S\subseteq \{0,1\}^n$, the uniform superposition $\ket{S}$ has QMDDs of size $2^{\Omega(n)}/(2n)$, with
\begin{align}
\ket{S}= \frac{1}{\sqrt{|S|}} \sum_{x\in S}\ket{x}
\end{align}
\end{theorem}
\begin{proof}[Proof sketch]
The idea is that BDDs are QMDDs taking values in $\{0,1\}$.
Conversely, whenever the amplitudes of a state $\ket{\phi}$ have values only in $\{0,z\}$ for some $z\in \mathbb C$, then, up to a phase, we have $\ket{\phi}=\ket{S}$, for some set of bitstrings $S\subseteq\{0,1\}^n$.
In this case, the QMDD has the same structure as the BDD of the indicator function $f_S$, namely, the weights on its edges are all in $\{0,1\}$, and they have the same number of nodes.
By taking $S$ to be a random vector space, the result follows from \autoref{thm:random-vector-space-hard-for-bdd} because all BDDs are branching programs.
\end{proof}
On the other hand, these states are compactly represented by $\braket{X}$-Isomorphism QMDDs, because they are stabilizer states.
For context, we note that the theorem proved by \v{D}uri\v{s} et al. is much stronger than what we need.
Namely, they show that, even if the qubits do not need to be ordered, and even if the diagram is allowed to flip nondeterministic coins, then it still holds that almost all vector spaces have exponential-size diagrams.
\paragraph{Vector spaces and stabilizer states}
A \emph{vector space} of $\{0,1\}^n$ is a set $S\subseteq\{0,1\}^n$ which contains the bitstring $0\in S$, and is closed under bitwise XOR, i.e., for each $a,b\in S$, it holds that $(a\oplus b)\in S$.
Each vector space has a basis, and has exactly $2^k$ elements for some $1\leq k\leq n$.
The uniform superposition over $S$ is the $n$-qubit state $\ket{S}_n$,
\begin{align}
\ket{S}_n =\frac{1}{\sqrt{|S|}} \sum_{x\in S}\ket{x}
\end{align}
\begin{theorem}
If $S$ is a vector space, then $\ket{S}$ is a stabilizer state.
\end{theorem}
\begin{proof}
For $n=1$, the statement holds trivially.
For $n>1$,
(Zelf denk ik dat de volgende route het snelst is: Een vector space bestaat uit de oplossingen van een stelsel lineaire vergelijkingen. Neem de eerste variabele, $x_1$. Dan kan je kijken naar $x_1:=0$ en $x_1:=1$, en dan krijg je weer twee stelsels lineaire vergelijkingen. Maar die zijn "hetzelfde", als ze allebei oplossingen hebben, namelijk de ene krijg je door de oplossingen van de ander te XORen met een slim gevonden bitstring. Deze uitleg is heel slecht).
\end{proof} | {
"alphanum_fraction": 0.7286175363,
"avg_line_length": 80.8260869565,
"ext": "tex",
"hexsha": "2209b08185b9285cf068b8ae11186d2297206af3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f00a9547b2034f4592e732a382cdbd34e11e13db",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Katafotic/latex_parsing",
"max_forks_repo_path": "Src/CS/sections/X_iso_qmdd_lower_bound_2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f00a9547b2034f4592e732a382cdbd34e11e13db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Katafotic/latex_parsing",
"max_issues_repo_path": "Src/CS/sections/X_iso_qmdd_lower_bound_2.tex",
"max_line_length": 465,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f00a9547b2034f4592e732a382cdbd34e11e13db",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Katafotic/latex_parsing",
"max_stars_repo_path": "Src/CS/sections/X_iso_qmdd_lower_bound_2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1186,
"size": 3718
} |
%
% File acl2018.tex
%
%% Based on the style files for ACL-2017, with some changes, which were, in turn,
%% Based on the style files for ACL-2015, with some improvements
%% taken from the NAACL-2016 style
%% Based on the style files for ACL-2014, which were, in turn,
%% based on ACL-2013, ACL-2012, ACL-2011, ACL-2010, ACL-IJCNLP-2009,
%% EACL-2009, IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt,a4paper]{article}
\usepackage[hyperref]{acl2018}
\usepackage{times}
\usepackage{latexsym}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}
%\usepackage{bibtex}
\usepackage{url}
\aclfinalcopy % Uncomment this line for the final submission
%\def\aclpaperid{***} % Enter the acl Paper ID here
%\setlength\titlebox{5cm}
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\title{Trying Tries Algorithm}
\author{Denizhan Pak \\ Indiana University - Computational
Linguistics Department\\ {\tt [email protected]}}
\date{October 10, 2019}
\begin{document}
\maketitle
\begin{abstract}
There are many reasons to apply morphological analysis at the
sentence level. For the application of computational tools to
corpus data it is important that all tokens are specified to
allow the tools to have as informative data as possible.
To determine the list of morphemes in a language however is a time
consuming task and unsupervised algorithm could relieve quite a few
researchers and grad students. In this paper we propose a
potentially useful unsupervised machine learning algorithm to
accomplish just this task.
\end{abstract}
\section{Introduction}
Morpheme parsing is an important task from a computational linguistics
standpoint as it provides a way to denote meaningful units within a corpus
in turn this allows us to apply the many tools which use these units using the
lowest components which still provide information. We propose that smaller
semantic units can be differentiated by their relative frequency. More
explicitly if we are able to identify substrings which occur as a cohesive
unit with a high relative frequency then those units correspond to morphemes
or some other sort of semantic unit. The algorithm below provides an
unsupervised method through which such semantic units can be distinguished.
The algorithm uses a data structure similar to a "trie" however edges between
nodes are assigned a transition probability, we shall call this a "p-trie."
The algorithm requires 3 user determined hyper parameters:
$0 < \lambda $, $< 1 < \rho$, and a function $f:\mathbb{Z}\to[0,1]$.
Where $\rho$ is a reinforcement rate,
$\lambda$ is a learning rate, and $f$ is a probability function. (For the
purpose of this project I will be using a translated sigmoidal function.)
\begin{algorithm}
\label{alg:algorithm1}
\caption{Build Trie}
\begin{algorithmic}
\STATE $context \leftarrow root$
\FOR{$char$ in $corpus$}
\IF{$pointer == root$}
\STATE $p_1 = 1,p_2=1$
\ELSE
\STATE $p_1 = \lambda, p_2 = \rho$
\ENDIF
\IF{$char$ in $context\to children$}
\STATE $weight_{char} \leftarrow weight_{char} * p_2$
\ELSE
\STATE $weight_{char} \leftarrow p_1$
\STATE $context\to children\ \textbf{append}\ char$
\ENDIF
\STATE $r \leftarrow \textbf{random number(0,1)}$
\IF{$r > f(weight_{char})$}
\STATE $context \leftarrow root$
\ELSE
\STATE $context \leftarrow char$
\ENDIF
\ENDFOR
\STATE $\textbf{return}\ root$
\end{algorithmic}
\end{algorithm}
At the end of the algorithm the function will have returned a $p-trie$ with
the estimated probability values. Once we have a $p-trie$ it is possible to
extract potential morphemes as sequences of nodes that start at the root. We
can even assign a certainty per morpheme which is the product of the weights
along its edges. This certainty can then be used to rate the likelihood of
possible segmentations. If the corpus is not large enough an easy approach to
improving access to data would simply be to randomize the words in the corpus
and run the algorithm through it again starting with the existing $p-trie$.
This technique is similar to random walks along markov chain. Since it is a
Bayesian method we can incorporate priors by passing a previously calculated
$p-trie$ \cite{Bayesian}.
\section{Proposed Goals}
\subsection{Minimum Viable Product}
The minimum viable product of this project is a complete implementation of the
algorithm defined in \textbf{Algorithm} \ref{alg:algorithm1} along with a
comprehensive performance analysis. The
analysis will include accuracy testing across multiple corpora in multiple
languages along with the effects of different parameterizations.
\subsection{Expected Product}
The expected product will be the minimum viable product as well as specific
optimizations such as simulated annealing for the parameter values, a more
complex function for reinforcement, and an automated randomization of the
corpus designed to improve performance.
\subsection{High-Achievement Product}
The high-achievement product will be the expected product as well as a generalization of the algorithm which could be applied to finding meaningful
sub-sequences in any sequential data with a particular eye toward word
embeddings. As well as a potential generative application of the generated
$p-trie$.
\section{Requirements}
\begin{itemize}
\item A working python implementation of the algorithm.
\item A well documented description of the algorithm and its
performance.
\item A report characterizing the algorithm including its strengths
and weaknesses.
\end{itemize}
\section{Timelines}
\begin{itemize}
\item 10.15-10.22 A first implementation of the algorithm and basic
testing using a single corpus.
\item 10.22-10.29 Testing algorithm using varying parameter sets
across multiple languages and corpora.
\item 10.29-11.06 Implementation of simulated annealing and varied
reinforcement and corpora randomization.
\item 11.06-11.16 Evaluation of new implementation across corpora
and languages.
\item 11.16-11.26 Generalization of the algorithm to any subsequence
and evaluation of performance on different data such as word
embeddings.
\item 11.26-12.03 Writing final report and preparing presentation.
\end{itemize}
This does not include literature review or the writing of documentation both
of which I intend to do as they coincide with different parts of the timeline
above.
\section{Data Policy}
All data and code will be made available on a public git page. The code will
be written in Python. The data for testing and evaluation will be taken from
the tree UD treebanks \cite{UDDocumentation} dataset from the following languages:
\begin{itemize}
\item English
\item Turkish
\item Chinese
\item Japanese (if time suffices)
\item Toki Pona (if time suffices)
\end{itemize}
Finally the effectiveness of the algorithm will be compared to the morphessor \cite{Morphessor}
python implementation.
\newpage
\section{References}
\begin{thebibliography}{9}
\bibitem{Bayes}
Knight, Kevin
\textit{Bayesian Inference with Tears
a tutorial workbook for natural language researchers}
\bibitem{Morphessor}
Creutz, Mathias and Lagus, Krista,
\textit{Unsupervised models for morpheme segmentation and morphology
learning}
\bibitem{UDDocumentation}
de Marneffe, Marie-Catherine and
Dozat, Timothy and
Silveira, Natalia and
Haverinen, Katri and
Ginter, Filip and
Nivre, Joakim and
Manning, Christopher D.,
\textit{Universal Stanford dependencies: A cross-linguistic typology}
\bibitem{ByteEnc}
Gage, Philip
\textit{A New Algorithm for Data Compression}
\end{thebibliography}
\section{Appendix}
In \textbf{Figure 1} the flow diagram is presented
to explain the functioning of the algorithm.
% Define block styles
\tikzstyle{decision} = [diamond, draw, fill=blue!20,
text width=4.5em, text badly centered, node distance=3cm, inner sep=0pt]
\tikzstyle{block} = [rectangle, draw, fill=blue!20,
text width=5em, text centered, rounded corners, minimum height=4em]
\tikzstyle{line} = [draw, -latex']
\tikzstyle{cloud} = [draw, ellipse,fill=red!20, node distance=3cm,
minimum height=2em]
\begin{figure}
\begin{tikzpicture}[node distance = 2cm, auto]
% Place nodes
\node [block] (init) {initialize trie};
\node [cloud, right of=init] (system) {corpus};
\node [block, below of=init] (root) {set context to root};
\node [block, below of=root] (terbed) {get input value $\to char$};
\node [decision, below of=terbed] (pointer) {is pointer set to root?};
\node [block, left of=pointer, node distance=3cm] (update)
{set $p_1 = p_2 = 1$};
\node [block, right of=pointer, node distance=3.3cm] (child)
{set $p_1 = \lambda$ and $p_2 = \rho$};
\node [decision, below of=pointer, node distance=3.3cm] (expected) {is
$char$ a child node of pointer?};
\node [block,below of=expected, right of=expected, node distance=2.33cm]
(notchild) {add $char$ as child with transition probability $p_1$};
\node [block, below of=expected, left of=expected, node distance=2.33cm]
(ischild) {multiply transition probability of char by $p_2$};
\node [block, below of=expected, node distance=6cm] (transition)
{set context (probabilistically) $p_3 = f(\text{transition probability of $char$})$};
\node [block, below of=transition, node distance=3.3cm] (input)
{set context to $char$};
% Draw edges
\path [line] (init) -- (root);
\path [line] (root) -- (terbed);
\path [line] (terbed) -- (pointer);
\path [line] (pointer) -| node [near start] {yes} (update);
\path [line] (pointer) -- node [near start] {no} (child);
\path [line] (update) |- (expected);
\path [line] (child) |- (expected);
\path [line] (expected) -- node {yes} (ischild);
\path [line] (expected) -- node {no} (notchild);
\path [line,dashed] (system) |- (terbed);
\path [line] (ischild) -- (transition);
\path [line] (notchild) -- (transition);
\path [line, dashed] (transition) -- node {$p_3$} (input);
\path [line, dashed] (transition) -|++ (-5,13) node [near start]{$1-p_3$}|- (root);
\path [line] (input) -|++ (-5,13) |- (terbed);
\end{tikzpicture}
\label{Flow}
\caption{Flow Diagram}
\end{figure}
% include your own bib file like this:
%\bibliographystyle{acl}
%\bibliography{acl2018}
\end{document}
| {
"alphanum_fraction": 0.7173619245,
"avg_line_length": 43.186770428,
"ext": "tex",
"hexsha": "b82a1b4a7e19ca2ef38a01f8469b7a818e5102da",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "55da8003e98052b39c5fa112e8c68272811a55b9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "denizhanpak/Trying-Tries",
"max_forks_repo_path": "Writeup/Trying-Trees.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "55da8003e98052b39c5fa112e8c68272811a55b9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "denizhanpak/Trying-Tries",
"max_issues_repo_path": "Writeup/Trying-Trees.tex",
"max_line_length": 148,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "55da8003e98052b39c5fa112e8c68272811a55b9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "denizhanpak/Trying-Tries",
"max_stars_repo_path": "Writeup/Trying-Trees.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2957,
"size": 11099
} |
Almost all interpretability methods work on image classification tasks. To learn how these methods are applied, how they work and what output they generate, the first step in this work is applying these methods on a classification task. We chose a dataset from the medical imaging field: the NIH (National Institutes of Health, United States) chest X-ray dataset \cite{wang2017chestx}.
We downloaded the dataset, trained a neural network on the dataset and applied the selected methods RISE, LIME and Grad-CAM described above.
\section{NIH Chest X-ray dataset}
The NIH Chest X-ray dataset contains 112,120 X-ray scans from 30,805 unique patients \cite{nihchestxraykaggle}. Every scan has one or more disease labels. Figure \ref{chest_xray_sample} show three sample images from the dataset.
\begin{figure}[h]
\centering
\includegraphics[width=14cm]{chapters/03_classification/images/chest-x-ray.png}
\caption{Examples for the NIH Chest X-ray dataset.}
\label{chest_xray_sample}
\end{figure}
\section{Model Training}
We decided to train a model based on an existing neural network architecture, because building a state of the art architecture is very hard and not the focus of this thesis.
The detailed results of the training sessions are available in the results directory of the GitHub repository: \href{https://github.com/andef4/thesis-code/tree/master/nhs-chest-xray/results/}{nhs-chest-xray/results}.
\subsection{Inception ResNet v2}
\nblink{nhs-chest-xray/inception\_resnetv2.ipynb}
We chose Inception ResNet v2 \cite{szegedy2017inception} as the first architecture to investigate. This is a modern neural network architecture built for image analysis.
The ResNet variant of the Inception architecture delivers similar performance as the normal Inception model, but with significantly reduced training time.
\nblink{nhs-chest-xray/preprocess.ipynb}
The preprocessing steps required to use this architecture are resizing the images to 299x299 pixels and converting the gray-scale images to color images.
We also tried to modify the network to directly use gray-scale images, but this was not successful because the network architecture is built for three channel images.
We initially trained the network on a smaller sample set (5607 image) of the data. The maximum reached validation accuracy was 40\%. The training times for the small training subset
were already very long, we therefore decided to abandon the Inception architecture for now and use a ResNet based architecture instead.
\subsection{ResNet}
\nblink{nhs-chest-xray/resnet.ipynb}
The first test of ResNet50 \cite{he2016deep} with pretrained parameters (trained on ImageNet) on the sample dataset showed fast training times but low accuracy. Training the network from scratch showed promising validation accuracy, which started to decrease on later epochs. As the training accuracy was still increasing, this was a clear indicator that the neural network is overfitting. We decided to change the architecture to ResNet18, which is a smaller ResNet variant with fewer parameters and should therefore be less susceptible to overfitting.
The first training run of ResNet18 with the sample dataset looked promising, so we moved to train the network on the full dataset. The results were underwhelming, with a validation accuracy maxing out at 23\%.
\subsection{Single label only}
The NIH Chest X-ray dataset is a multi class dataset. This means a single image can contain labels for multiple diseases. Training models for such datasets is much harder than training datasets where each image has only one label. We therefore decided to remove all images which contain multiple labels and trained on images with one label.
We did multiple iterations on the full dataset with ResNet18, experimenting with different parameters:
\begin{itemize}
\item Using SGD optimizer instead of Adam
\item Using a pretrained model vs. from-scratch-model
\item Using data augmentation on the input (color jitter, random horizontal flip)
\end{itemize}
None of these parameters changed the validation accuracy significantly, always peaking around 60\% to 65\%.
\subsection{DenseNet}
\nblink{nhs-chest-xray/densenet.ipynb}
Researching what other people did to train a model for this dataset, we came across the CheXNet \cite{rajpurkar2017chexnet} paper and an implementation of the paper in PyTorch \cite{chexnetpytorch}.
The paper uses the DenseNet \cite{huang2017densely} architecture. An implementation of DenseNet is available in PyTorch. The implementation of the paper provided a pretrained model, but loading the saved model did not work, because the used version of PyTorch was old and incompatible with ours.
\subsection{Retraining DenseNet}
\nblink{nhs-chest-xray/densenet\_singleclass.ipynb}
After fixing multiple implementation errors, e.g. incorrectly setting the output class count, assigning of classes to output neurons not the same in test vs. validation set, we started to get acceptable accuracy rates for the DenseNet implementation, peaking at around 55\% accuracy.
\subsection{Removing "No findings" class}
The implementation of the CheXNet paper displayed an accuracy table per class. We decided to do the same for our implementation. We quickly discovered that only the "No findings" class had correctly classified images, all other classes had zero correct classifications. The "No findings" class is by far the largest class in the dataset. For the neural network to get a good result, the easiest way is to just declare all images to be in the class "No findings".
We removed the "No findings" class from the dataset and retrained both the ResNet18 and the DenseNet implementation, getting similar results of around 30\% accuracy.
\subsection{Weighted classes}
The dataset still has a class imbalance, even with the "No findings" class removed. To counteract this problem, we calculated the percentage every class has compared to the full dataset and gave these information as class weight to the back propagation criterion.
This increased the accuracy to only around 33\%, but should help classes which are underrepresented in the dataset.
\subsection{Conclusion}
At this point we decided to stop trying to enhance the neural network, because the training already took a long time and an accurate network is not the goal of this thesis. Some classes showed high accuracy ratings: Analyzing these classes should provide enough insight how the interpretability methods works.
| {
"alphanum_fraction": 0.8115070184,
"avg_line_length": 85.3026315789,
"ext": "tex",
"hexsha": "cc4c81fe858210d4432abe07c3723fe82564e35c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "andef4/thesis-doc",
"max_forks_repo_path": "chapters/03_classification/01_training.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "andef4/thesis-doc",
"max_issues_repo_path": "chapters/03_classification/01_training.tex",
"max_line_length": 553,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "andef4/thesis-doc",
"max_stars_repo_path": "chapters/03_classification/01_training.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1389,
"size": 6483
} |
\chapter{Oerba 200AF}
%This shoudl all be done with actual figures
Anomaly 1 - Stage 1
\includegraphics{Images/anomaly1stage1}
\pickup{2 Ghysal Greens}{on the right if possible without getting an encounter}
Anomaly 2 - Stage 1
\includegraphics{Images/anomaly2stage1}
Anomaly 2 - Stage 2
\includegraphics{Images/anomaly2stage2}
\pickup{500 Gil}{near the tree}
Anomaly 3 - Stage 1
\includegraphics{Images/anomaly3stage1}
Anomaly 3 - Stage 2
\includegraphics{Images/anomaly3stage2}
Anomaly 3 - Stage 3
\includegraphics{images/anomaly3stage3}
\begin{menu}
\begin{itemize}
\crystarium
\begin{itemize}
\item Noel:
\begin{itemize}
\item All \rav
\item \stagebonus{\rav}
\end{itemize}
\item Serah:
\begin{itemize}
\item All \rav
\item \stagebonus{ATB Level}
\end{itemize}
\end{itemize}
\end{itemize}
\end{menu}
\pickup{Librascope}{at the stairs}
\pickup{600 Gil}{in the house}
\begin{battle}{Oerba Caius}
\begin{itemize}
\item \sixth
\begin{itemize}
\item Shift
\end{itemize}
\item \second
\begin{itemize}
\item Auto-Chain
\item Gahongas Feral Link, \circlec, \squarec, \circlec
\end{itemize}
\item \third
\begin{itemize}
\item Auto-Chain
\end{itemize}
\item \fifth
\begin{itemize}
\item Blizzara-Aerora
\item Repeat, \com-buffer into
\end{itemize}
\item \first
\begin{itemize}
\item Ruins x dead
\end{itemize}
\end{itemize}
\end{battle}
\pickup{Artifact of Origins}{in front of you}. Force an encounter in the corner wall. Get on the Chocobo. Get the Ghysal Greens and Librascope if you haven't already. \pickup{hidden Graviton Core}{on the schoolhouse roof} Head to the gate.
\newline | {
"alphanum_fraction": 0.6549104721,
"avg_line_length": 22.2048192771,
"ext": "tex",
"hexsha": "378c9979d0ced3d24147edced2d58a842be36393",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2021-10-03T12:58:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-06T10:30:25.000Z",
"max_forks_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_forks_repo_path": "Final Fantasy XIII-2/Chapters/oerba200af.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_issues_repo_issues_event_max_datetime": "2020-11-18T11:44:28.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-05T08:11:06.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_issues_repo_path": "Final Fantasy XIII-2/Chapters/oerba200af.tex",
"max_line_length": 239,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_stars_repo_path": "Final Fantasy XIII-2/Chapters/oerba200af.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-18T09:01:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-27T04:50:16.000Z",
"num_tokens": 590,
"size": 1843
} |
\chapter{\toolname-based Competitions} \label{chap:competitions}
In this chapter, we describe how to participate in \toolname-based competitions.
For more details, check out the competition specification document~\cite{CompetitionSpecification}.
\toolname{} defines several {\it official competitions} (\sref{sec:competitions:overview}), which are open globally to everyone who satisfies the rules for participation. Each competition ranks the benchmark submission based on a competition method defined in \sref{sec:competitions}.
\section{Official \toolname{} Competitions}
\label{sec:competitions:overview}
Currently, \toolname{} defines two official competitions: (1)~the Global LDBC Competition and (2)~the Global \toolname{} Competition.
\subsection{The Global LDBC Competition} \label{sec:competitions:ldbc}
The Global LDBC Competition is maintained by LDBC, in particular by the Graphalytics Task Force. By the rules of the LDBC charter~\cite{ldbc_byelaws}, the competition method follows the single value-of-merit approach described in \sref{sec:competitions:single_value}, and focuses on two primary metrics: {\bf ``performance''} and {\bf ``cost-performance''}.
The competition reports the following list of metrics:
\begin{enumerate}
\item (informative only) Full disclosure of the ``system under test'' (platform + environment).
\item (informative only) {\it Target scale} of the benchmark.
\item (informative only) {\it Date} of the benchmark execution.
\item (flagship value-of-merit) {\it Performance metric}, as summarized from ``EVPS'' of all benchmarked jobs.
\item (capability value-of-merit) {\it Cost-performance metric}, as summarized from ``PPP'' of all benchmarked jobs.
\item (informative only) Three {\it performance metrics}, as summarized from $T_l$, $T_m$, and $T_p$ respectively.
\end{enumerate}
\begin{description}
\item[Maintained by:] LDBC, \url{ldbcouncil.org}.
\item[Audience:] The LDBC Competition accepts submissions from a global audience.
\end{description}
\futureinversion{2.0}{Ratio metrics (scalability) and additional energy-related metrics will also be considered.}
\subsection{The Global \toolname{} Competition} \label{sec:competitions:graphalytics}
The Global \toolname{} Competition is maintained by the \toolname team. The competition method follows the tournament-based approach described in \sref{sec:competitions:tournament}, and focuses on two primary scores: {\bf ``performance''} and {\bf ``cost-performance''}.
The Graphalytics consists of a number of {\it matches}, where each match represents a type of experiment that focuses on a specific performance characteristic that is common across all systems, for example, the EVPS of the BFS algorithm on a Datagen dataset. Each match consists of a set of instances, with the {\bf tournament score} being for each system the sum of {\bf instance scores} accumulated by the platform across all matches in which it participates. Each {\it instance} is a head-to-head comparison between two systems, for example, comparing the EVPS of any algorithm-dataset for the pair (Giraph, GraphX): the winner receives 1 point, the loser 0 points, and a draw rewards each platform with 0.5 points each.
\begin{enumerate}
\item (informative only) Full disclosure of the ``system under test'' (platform + environment).
\item (informative only) {\it Target scale} of the benchmark.
\item (informative only) {\it Date} of the benchmark execution.
\item (ranking) {\it Performance score}, by comparing pair-wisely ``EVPS'' of all benchmarked jobs.
\item (ranking) {\it Cost-performance score}, by comparing pair-wisely ``PPP'' of all benchmarked jobs.
\end{enumerate}
\begin{description}
\item[Maintained by:] \toolname, \url{graphalytics.org}.
\item[Audience:] The Global \toolname{} Competition accepts submissions from a global audience.
\end{description}
\futureinversion{2.0}{Ratio metrics (scalability) and additional energy-related metrics will also be considered.}
\section{Competition Method} \label{sec:competitions}
Different competition methods have been developed to performance comparison in many application domains. Comparing multiple platforms across multiple performance metrics is not trivial. Two major approaches exist for this task: (i) creating a compound metric, typically by weighting the multiple metrics, and comparing multiple platforms using only this single-value-of-merit, and (ii) using a tournament format that allows for multiple participants (platforms) to be compared across multiple criteria.
The former requires metrics to be easy to compare and compose, that is, to be normalized, to be similarly distributed, to have the same meaning of better (e.g., lower values), to be of importance universally recognized across the field of practice so that weights can be easily ascribed. The latter requires a good tournament format, which does not favor any of the participants, and which does not make participation cumbersome through a large set of rules.
\subsection{Single Value-of-merit Approach} \label{sec:competitions:single_value}
Where metrics (see \sref{sec:def:metrics}) are collected repeatedly, e.g., each combination of algorithm and dataset, a single value-of-merit can be summarized following the typical processes of benchmarking HPC systems~\cite{DBLP:conf/sc/HoeflerB15}:
\begin{itemize}
\item For \textbf{Performance metrics,} the \emph{arithmetic mean} across all data.
\item For \textbf{Throughput metrics,} because they are rate metrics, in two consecutive steps:
\begin{enumerate}
\item let $a$ be the \emph{arithmetic mean} of the performance metric (e.g., processing time) and $w$ be the (constant, total) workload (e.g., count of edges plus vertices),
\item report the \emph{ratio} between $w$ and $a$ as the throughput metric.
\end{enumerate}
In other words, instead of averaging the rate per sample, that is, $\textit{EVPS}_i$ for sample $i$, \toolname{} first averages the performance metric and then reports the rate. \futureinversion{2.0}{Maybe change ``rate'' to ``ratio'' in this sentence [Gabor]}
\item For \textbf{Cost metrics,} the \emph{harmonic mean} across all data. This is because the denominator (e.g., EVPS for PPP) gives meaning to the ratio (TCO is constant across experiments with the same System Under Test), which indicates that the arithmetic mean would be misleading~\cite[S.3.1.1]{DBLP:conf/sc/HoeflerB15}.
\item For \textbf{Ratio metrics} such as Speedup, the \emph{geometric mean} across all data.
\end{itemize}
\subsection{Tournament-based Approach} \label{sec:competitions:tournament}
In a tournament-based approach, the system performance is ranked by means of competitive tournaments~\cite{Thurstone1927}. Generally, a Round-Robin pair-wise tournament~\cite{David1960} (from hereon, {\it tournament}) of $p$ participants involves a balanced set of (pair-wise) comparisons between the results of each pair of participants; if there are $c$ criteria to compare the participants, there will be $\frac{1}{2} \times c \times p (p - 1)$ pair-wise comparisons. In a pair-wise comparison, a pre-defined amount of points (often, 1 or 3) is given to the better ({\it winner}) participant from the pair. It is also common to give zero points to the worse ({\it loser}) participant from the pair, and to split the points between participants with equal performance. Similar tournaments have been used for decades in chess competitions, in professional sports leagues such as (European and American) football, etc.
We do not consider here other pair-wise tournaments, such as replicated tournaments~\cite{David1960} and unbalanced comparisons~\cite{david1987ranking}, which have been used especially in settings where comparisons are made by human referees and are typically discretized on 5-point Likert scales, and thus are quantitatively less accurate than the \toolname{} measurements.
| {
"alphanum_fraction": 0.7762992719,
"avg_line_length": 75.8666666667,
"ext": "tex",
"hexsha": "4390c201bedef78473a6c193895b2cf16e72ec18",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T19:58:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-06-14T18:10:29.000Z",
"max_forks_repo_head_hexsha": "991dc12df003b5782b768237478f2d44a677dd4f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "tudelft-atlarge/graphalytics_docs",
"max_forks_repo_path": "tex/competitions.tex",
"max_issues_count": 10,
"max_issues_repo_head_hexsha": "991dc12df003b5782b768237478f2d44a677dd4f",
"max_issues_repo_issues_event_max_datetime": "2019-12-03T12:24:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-06-14T18:55:26.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "tudelft-atlarge/graphalytics_docs",
"max_issues_repo_path": "tex/competitions.tex",
"max_line_length": 919,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "991dc12df003b5782b768237478f2d44a677dd4f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "tudelft-atlarge/graphalytics_docs",
"max_stars_repo_path": "tex/competitions.tex",
"max_stars_repo_stars_event_max_datetime": "2021-01-25T14:29:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-25T14:29:19.000Z",
"num_tokens": 1871,
"size": 7966
} |
% Reference Card for AUCTeX version 12.1
%**start of header
\newcount\columnsperpage
% This file has only been checked with 3 columns per page. But it
% should print fine either via DVI or PDFTeX.
\columnsperpage=3
% Papersize stuff. Use default paper size for PDF, but switch
% orientation. Use papersize special for dvips.
\ifx\pdfoutput\undefined
\csname newcount\endcsname\pdfoutput
\pdfoutput=0
\fi
\ifnum\pdfoutput=0
% \special{papersize 8.5in,11in}%
\special{papersize 297mm,210mm}%
\else
\dimen0\pdfpagewidth
\pdfpagewidth\pdfpageheight
\pdfpageheight\dimen0
\fi
% This file is intended to be processed by plain TeX (TeX82).
% compile-command: "tex tex-ref" or "pdftex tex-ref"
%
% Original author of Auc-TeX Reference Card:
%
% Terrence Brannon, PO Box 5027, Bethlehem, PA 18015 , USA
% internet: [email protected] (215) 758-1720 (215) 758-2104
%
% Kresten Krab Thorup updated the reference card to 6.
% Per Abrahamsen updated the reference card to 7, 8, and 9.
% Ralf Angeli updated it to 11.50.
% And David Kastrup messed around with it, too, merging the math reference.
%
% Thanks to Stephen Gildea
% Paul Rubin, Bob Chassell, Len Tower, and Richard Mlynarik
% for creating the GNU Emacs Reference Card from which this was mutated
\def\versionnumber{12.2}
\def\year{2019}
\def\version{October \year\ v\versionnumber}
\def\shortcopyrightnotice{\vskip 1ex plus 2 fill
\centerline{\small \copyright\ \year\ Free Software Foundation, Inc.
Permissions on back. v\versionnumber}}
\def\copyrightnotice{%
\vskip 1ex plus 2 fill\begingroup\small
\centerline{Copyright \copyright\ 1987, 1992-1994, 2004-2006, 2008, 2010,}
\centerline{2012, 2014-2017, 2019 Free Software Foundation, Inc.}
\centerline{for AUC\TeX\ version \versionnumber}
Permission is granted to make and distribute copies of
this card provided the copyright notice and this permission notice
are preserved on all copies.
\endgroup}
% make \bye not \outer so that the \def\bye in the \else clause below
% can be scanned without complaint.
\def\bye{\par\vfill\supereject\end}
\newdimen\intercolumnskip
\newbox\columna
\newbox\columnb
\edef\ncolumns{\the\columnsperpage}
\message{[\ncolumns\space
column\if 1\ncolumns\else s\fi\space per page]}
\def\scaledmag#1{ scaled \magstep #1}
% This multi-way format was designed by Stephen Gildea
% October 1986.
\if 1\ncolumns
\hsize 4in
\vsize 10in
\voffset -.7in
\font\titlefont=\fontname\tenbf \scaledmag3
\font\headingfont=\fontname\tenbf \scaledmag2
\font\smallfont=\fontname\sevenrm
\font\smallsy=\fontname\sevensy
\footline{\hss\folio}
\def\makefootline{\baselineskip10pt\hsize6.5in\line{\the\footline}}
\else
\hsize 3.2in
\vsize 7.6in
\hoffset -.75in
\voffset -.8in
\font\titlefont=cmbx10 \scaledmag2
\font\headingfont=cmbx10 \scaledmag1
\font\smallfont=cmr6
\font\smallsy=cmsy6
\font\eightrm=cmr8
\font\eightbf=cmbx8
\font\eightit=cmti8
\font\eighttt=cmtt8
\font\eightsl=cmsl8
\font\eightsc=cmcsc8
\font\eightsy=cmsy8
\textfont0=\eightrm
\textfont2=\eightsy
\def\rm{\fam0 \eightrm}
\def\bf{\eightbf}
\def\it{\eightit}
\def\tt{\eighttt}
\def\sl{\eightsl}
\def\sc{\eightsc}
\normalbaselineskip=.8\normalbaselineskip
\ht\strutbox.8\ht\strutbox
\dp\strutbox.8\dp\strutbox
\normallineskip=.8\normallineskip
\normallineskiplimit=.8\normallineskiplimit
\normalbaselines\rm %make definitions take effect
\if 2\ncolumns
\let\maxcolumn=b
\footline{\hss\rm\folio\hss}
\def\makefootline{\vskip 2in \hsize=6.86in\line{\the\footline}}
\else \if 3\ncolumns
\let\maxcolumn=c
\nopagenumbers
\else
\errhelp{You must set \columnsperpage equal to 1, 2, or 3.}
\errmessage{Illegal number of columns per page}
\fi\fi
\intercolumnskip=.46in
\def\abc{a}
\output={%
% This next line is useful when designing the layout.
%\immediate\write16{Column \folio\abc\space starts with \firstmark}
\if \maxcolumn\abc \multicolumnformat \global\def\abc{a}
\else\if a\abc
\global\setbox\columna\columnbox \global\def\abc{b}
%% in case we never use \columnb (two-column mode)
\global\setbox\columnb\hbox to -\intercolumnskip{}
\else
\global\setbox\columnb\columnbox \global\def\abc{c}\fi\fi}
\def\multicolumnformat{\shipout\vbox{\makeheadline
\hbox{\box\columna\hskip\intercolumnskip
\box\columnb\hskip\intercolumnskip\columnbox}
\makefootline}\advancepageno}
\def\columnbox{\leftline{\pagebody}}
\def\bye{\par\vfill\supereject
\if a\abc \else\null\vfill\eject\fi
\if a\abc \else\null\vfill\eject\fi
\end}
\fi
% we won't be using math mode much, so redefine some of the characters
% we might want to talk about
\catcode`\^=12
\catcode`\_=12
\chardef\\=`\\
\chardef\{=`\{
\chardef\}=`\}
\hyphenation{mini-buf-fer}
\parindent 0pt
\parskip 1ex plus .5ex minus .5ex
\def\small{\smallfont\textfont2=\smallsy\baselineskip=.8\baselineskip}
\def\newcolumn{\vfill\eject}
\def\title#1{{\titlefont\centerline{#1}}\vskip 1ex plus .5ex}
\def\section#1{\par\vskip 0pt plus 0.2\vsize \penalty-3000
\vskip 0pt plus -0.2\vsize
\vskip 3ex plus 2ex minus 2ex {\headingfont #1}\mark{#1}%
\vskip 2ex plus 1ex minus 1.5ex}
\newdimen\keyindent
\def\beginindentedkeys{\keyindent=1em}
\def\endindentedkeys{\keyindent=0em}
\endindentedkeys
\def\paralign{\vskip\parskip\halign}
\def\<#1>{$\langle${\rm #1}$\rangle$}
\def\kbd#1{{\tt#1}\null} %\null so not an abbrev even if period follows
\def\beginexample{\par\leavevmode\begingroup
\obeylines\obeyspaces\parskip0pt\tt}
{\obeyspaces\global\let =\ }
\def\endexample{\endgroup}
\def\key#1#2{\leavevmode\hbox to \hsize{\vtop
{\hsize=.68\hsize\rightskip=1em
\hskip\keyindent\relax#1}\kbd{#2}\hfil}}
\newbox\metaxbox
\setbox\metaxbox\hbox{\kbd{M-x }}
\newdimen\metaxwidth
\metaxwidth=\wd\metaxbox
\def\metax#1#2{\leavevmode\hbox to \hsize{\hbox to .75\hsize
{\hskip\keyindent\relax#1\hfil}%
\hskip -\metaxwidth minus 1fil
\kbd{#2}\hfil}}
\def\threecol#1#2#3{\hskip\keyindent\relax#1\hfil&\kbd{#2}\quad
&\kbd{#3}\quad\cr}
\def\LaTeX{%
L\kern-.36em\raise.3ex\hbox{\sc{a}}\kern-.15em\TeX}
%**end of header
\title{AUC\TeX\ Reference Card}
\centerline{(for version \versionnumber)}
\section{Conventions Used}
\key{Carriage Return or \kbd{C-m}}{RET}
\key{Tabular or \kbd{C-i}}{TAB}
\key{Linefeed or \kbd{C-j}}{LFD}
\section{Shell Interaction}
\key{Run a command on the master file}{C-c C-c}
\key{Run a command on the buffer}{C-c C-b}
\key{Run a command on the region}{C-c C-r}
\key{Fix the region}{C-c C-t C-r}
\key{Kill job}{C-c C-k}
\key{Recenter output buffer}{C-c C-l}
\key{Next error in \TeX/\LaTeX\ session}{C-c `}
\key{Previous error in \TeX/\LaTeX\ session}{M-g p}
\key{Toggle debug of bad boxes}{C-c C-t C-b}
\key{Toggle debug of warnings}{C-c C-t C-w}
\key{View output file}{C-c C-v}
\key{Compile all and view output file}{C-c C-a}
Commands you can run on the master file (with \kbd{C-c C-c}) or the
region (with \kbd{C-c C-r}) include the following (starred versions
are not available in all modes):
\def\star{\llap{\rm*}}
\key{\TeX}{\star TeX}
\key{\LaTeX}{\star LaTeX}
\key{Con\TeX{}t (once)}{\star ConTeXt}
\key{Con\TeX{}t Full}{\star ConTeXt Full}
\key{Makeinfo}{\star Makeinfo}
\key{Makeinfo with HTML output}{\star Makeinfo HTML}
\key{Appropriate previewer}{View}
\key{Print the output}{Print}
\key{Bib\TeX}{BibTeX}
\key{Biber}{Biber}
\key{MakeIndex}{Index}
\key{LaCheck}{Check}
\key{Make (PostScript) File}{File}
\key{Ispell}{Spell}
\key{Delete intermediate files}{Clean}
\key{Delete all output files}{Clean All}
\section{\TeX ing options}
\TeX\ runs can come in various types, which may be toggled and are
indicated in the mode line.
\key{PDF/DVI mode}{C-c C-t C-p}
\key{Stop on errors (Interactive mode)}{C-c C-t C-i}
\key{I/O correlation (S. Specials, Sync\TeX)}{C-c C-t C-s}
\section{Miscellaneous}
\key{Read AUC\TeX\ manual}{C-c TAB}
\key{Find documentation}{C-c ?}
\key{Math Mode}{C-c \string~}
\key{Reset Buffer}{C-c C-n}
\key{Reset AUC\TeX}{C-u C-c C-n}
\section{Multifile Handling}
\key{Save Document}{C-c C-d}
\key{Switch to master file or active buffer}{C-c ^}
\key{Query for a master file}{C-c \_}
\section{Command Insertion}
\key{Insert Section}{C-c C-s}
\key{Insert \LaTeX\ environment}{C-c C-e}
\key{Insert item}{C-c LFD}
\key{Insert item (alias)}{M-RET}
\key{Close \LaTeX\ environment}{C-c ]}
\key{Insert \TeX\ macro \kbd{\{\}} }{C-c C-m}
\key{Insert double brace}{C-c \{}
\key{Complete \TeX\ macro}{M-TAB}
\key{Smart ``quote''}{"}
\key{Smart ``dollar''}{\$}
\section{Font Selection}
\key{Insert {\bf bold\/} text}{C-c C-f C-b}
\key{Insert {\it italics\/} text}{C-c C-f C-i}
\key{Insert {\rm roman} text}{C-c C-f C-r}
\key{Insert {\it emphasized\/} text}{C-c C-f C-e}
\key{Insert {\tt typewriter\/} text}{C-c C-f C-t}
\key{Insert {\sl slanted\/} text}{C-c C-f C-s}
\key{Insert {\sc Small Caps\/} text}{C-c C-f C-c}
\key{Delete font}{C-c C-f C-d}
\key{Replace font}{C-u C-c C-f \<key>}
\section{Source Formatting}
\key{Indent current line}{TAB}
\key{Indent next line}{LFD}
\key{Format a paragraph}{M-q}
\key{Format a region}{C-c C-q C-r}
\key{Format a section}{C-c C-q C-s}
\key{Format an environment}{C-c C-q C-e}
\key{Mark an environment}{C-c .}
\key{Mark a section}{C-c *}
\key{Comment or uncomment region}{C-c ;}
\key{Comment or uncomment paragraph}{C-c \%}
\copyrightnotice
\newcolumn
\title{Math Mode}
\section{Variables}
All math mode commands are under the prefix key specified by
LaTeX-math-abbrev-prefix, default is "`".
You can define your own math mode commands by setting the variable
LaTeX-math-list before loading LaTeX-math-mode.
\section{Greek Letters}
\def\disp#1{\hbox to 6ex{$#1$\hfill}}
\def\twocol#1\par{{%
\def\key##1##2{##1&##2\cr}%
\setbox0\vbox{\halign to 0.45\hsize{\tabskip0ptplus1fil\relax
##\hfil&\kbd{##}\hfil\cr\vrule width0ptheight\ht\strutbox#1}}%
\line{%
\splittopskip=\ht\strutbox
\dimen0\ht0
\advance\dimen0\baselineskip
\setbox2\vsplit0to0.5\dimen0
\vtop{\unvbox2}\hfill\raise \ht\strutbox \vtop {\unvbox0}}}}
\def\keycs#1#2#{\keycsii#1{#2}}
\def\keycsii#1#2#3{\key{\disp{#1#2} ({\tt\string#1})}{#3}}
\twocol
\keycs\alpha{a}
\keycs\beta{b}
\keycs\gamma{g}
\keycs\delta{d}
\keycs\epsilon{e}
\keycs\zeta{z}
\keycs\eta{h}
\keycs\theta{j}
\keycs\kappa{k}
\keycs\lambda{l}
\keycs\mu{m}
\keycs\nu{n}
\keycs\xi{x}
\keycs\pi{p}
\keycs\rho{r}
\keycs\sigma{s}
\keycs\tau{t}
\keycs\upsilon{u}
\keycs\phi{f}
\keycs\chi{q}
\keycs\psi{y}
\keycs\omega{w}
\keycs\Delta{D}
\keycs\Gamma{G}
\keycs\Theta{J}
\keycs\Lambda{L}
\keycs\Xi{X}
\keycs\Pi{P}
\keycs\Sigma{S}
\keycs\Upsilon{U}
\keycs\Phi{F}
\keycs\Psi{Y}
\keycs\Omega{W}
\section{Symbols}
\twocol
\keycs\rightarrow{C-f}
\keycs\leftarrow{C-b}
\keycs\uparrow{C-p}
\keycs\downarrow{C-n}
\keycs\leq{<}
\keycs\geq{>}
\keycs\tilde x{\string~}
\keycs\hat x{^}
\keycs\nabla{N}
\keycs\infty{I}
\keycs\forall{A}
\keycs\exists{E}
\keycs\not \ {/}
\keycs\in{i}
\keycs\times{*}
\keycs\cdot{.}
\keycs\colon{:}
\keycs\subset{\{}
\keycs\supset{\}}
\keycs\subseteq{[}
\keycs\supseteq{]}
\keycs\emptyset{0}
\keycs\setminus{\\}
\keycs\cup{+}
\keycs\cap{-}
\keycs\langle{(}
\keycs\rangle{)}
\keycs\exp{C-e}
\keycs\sin{C-s}
\keycs\cos{C-c}
\keycs\sup{C-^}
\keycs\inf{C-_}
\keycs\det{C-d}
\keycs\lim{C-l}
\keycs\tan{C-t}
\keycs\vee{|}
\keycs\wedge{\&}
\section{Miscellaneous}
\key{cal letters}{c \<letter>}
\newcolumn
\def\previewlatex{{preview-latex}}
\title{\previewlatex}
\section{Activation}
\previewlatex\ is part of AUC\TeX. If it is active, you should see an
entry ``Preview'' in the menu bar when editing \LaTeX{} files. If you
have a ``LaTeX'', but no ``Preview'' menu, add the following to your
{\tt .emacs} file:
\beginexample
(load "preview-latex.el" nil t t)
\endexample
\section{Usage and keybindings}
\previewlatex\ operation only affects the display of the buffer, not
its contents. It runs only on demand, using the target {\sc dvi} or
{\sc PDF} files in the process. The first command in the following
list (also on the toolbar button) will (as applicable) repreview an
active region or a single modified preview, toggle the visibility of
an unmodified preview or generate previews for a surrounding buffer
area up to the next preview.
\key{Preview at point}{C-c C-p C-p}
\key{Preview environment}{C-c C-p C-e}
\key{Preview region}{C-c C-p C-r}
\key{Preview buffer}{C-c C-p C-b}
\key{Preview document}{C-c C-p C-d}
\key{Remove previews at point}{C-c C-p C-c C-p}
\key{Remove previews from region}{C-c C-p C-c C-r}
\key{Remove previews from buffer}{C-c C-p C-c C-b}
\key{Remove previews from document}{C-c C-p C-c C-d}
\key{Cache preamble}{C-c C-p C-f}
\key{Switch off preamble cache}{C-c C-p C-c C-f}
\key{Read Texinfo manual}{C-c C-p TAB}
\key{Copy region as MML}{C-c C-p C-w}
The last keysequence will copy a region with previews into the kill
buffer in a form fit for sending in Emacs' message-mode.
\section{Customization within Emacs}
You can use \kbd{M-x customize-variable RET} or the ``Preview\slash
Customize'' menu for customization. Worthwhile settings:
\halign to \hsize{\tabskip=1ptplus1fil\relax#\hfil&\hfil\kbd{#}\tabskip0pt\cr
\noalign{\medskip If you have dvipng available:}
Set to \kbd{dvipng}&preview-image-type\cr
\noalign{\medskip \vbox{Keep counter values when regenerating
single previews:}}
Set to \kbd{t}&preview-preserve-counters\cr
\noalign{\medskip \vbox{Cache/Don't cache preamble without query
(preamble caching is done using {\tt mylatex.ltx} and might not always
work. Use the appropriate key sequences for overriding the following
setting):}}
Set to \kbd{t}/\kbd{nil}&preview-auto-cache-preamble\cr}
\section{Customization from \LaTeX{}}
Customization is done in the document preamble (you need to load {\tt
preview.sty} explicitly) or in {\tt prauctex.cfg} (which should load
the system {prauctex.cfg} first). Commands:
\halign to \hsize{\tabskip=1ptplus1fil\relax#\hfil&\hfil\kbd{#}\tabskip0pt\cr
Preview macro&\string\PreviewMacro[\{\<args>\}]\{\<macro>\}\cr
Preview env&\string\PreviewEnvironment[\{\<args>\}]\{\<env>\}\cr
Skip macro&\string\PreviewMacro*[\{\<args>\}]\{\<macro>\}\cr
Skip env&\string\PreviewEnvironment*[\{\<args>\}]\{\<env>\}\cr
\noalign{\smallskip Diverting material from float environments}
Snarf stuff&\string\PreviewSnarfEnvironment[\{\<args>\}]\{\<env>\}\cr
}
Values to be used within \<args>:
\halign to
\hsize{\tabskip=1ptplus1fil\relax#\hfil&\hfil\kbd{#}\tabskip0pt\cr
Optional argument&[]\cr
Mandatory argument&\{\}\cr
Optional star&*\cr
Conditionals&?\<token>\{\<if found>\}\{\<if not found>\}\cr
Skip next token&-\cr
Transformation&\#\{\<macro args>\}\{\<replacement>\}\cr
}
More options and explanations can be found in {\tt preview.dvi} or the
Texinfo manual.
\vskip 5ex plus 6ex minus 1ex
\title{Folding Source Display}
\key{Toggle folding mode}{C-c C-o C-f}
\key{Hide all items in buffer}{C-c C-o C-b}
\key{Hide all items in region}{C-c C-o C-r}
\key{Hide all items in paragraph}{C-c C-o C-p}
\key{Hide current macro}{C-c C-o C-m}
\key{Hide current environment}{C-c C-o C-e}
\key{Show all items in buffer}{C-c C-o b}
\key{Show all items in region}{C-c C-o r}
\key{Show all items in paragraph}{C-c C-o p}
\key{Show current item}{C-c C-o i}
\key{Hide or show current item}{C-c C-o C-o}
\vskip 5ex plus 6ex minus 1ex
\title{Outlining \TeX\ Documents}
AUC\TeX\ supports outline mode by defining section, subsection,
etc. as heading levels. You can use \kbd{M-x outline-minor-mode RET}
to toggle outline minor mode. All outline minor mode commands are
under the prefix key specified by outline-minor-mode-prefix, default
is ``C-c @''.
\key{Hide all of buffer except headings}{C-c @ C-t}
\key{Show all text in buffer}{C-c @ C-a}
\key{Hide body following this heading}{C-c @ C-c}
\key{Show body following this heading}{C-c @ C-e}
\key{Hide subtree}{C-c @ C-d}
\key{Show subtree}{C-c @ C-s}
\key{All subheadings visible}{C-c @ C-k}
\key{next visible heading}{C-c @ C-n}
\key{previous visible heading}{C-c @ C-p}
\key{forward to next subheading}{C-c @ C-f}
\key{backward to next subheading}{C-c @ C-b}
\key{up one heading level}{C-c @ C-u}
\newcolumn
\iftrue % RefTeX long version
\title{RefTeX}
\section{Activation}
RefTeX is part of [X]Emacs. To activate and make it interact with
AUCTeX, insert the following lines in .emacs.
\vskip-3mm
\beginexample
(add-hook 'LaTeX-mode-hook 'turn-on-reftex)
(setq reftex-plug-into-AUCTeX t)
\endexample
\section{Table of Contents}
The table of contents is a structured view of the entire document. It
contains the sections, and optionally labels, index entries, and file
boundaries.
\key{Show the table of contents$\sp1$}{C-c =}
\key{Recenter *toc* buffer to here$\sp1$}{C-c -}
\section{Crossreferences, Citations, Index}
\key{Insert unique label$\sp1$}{C-c (}
\key{Reference a label$\sp1$}{C-c )}
\key{Insert citation with key selection}{C-c [}
\key{\dots\ prompt for optional arguments}{C-u C-c [}
\key{Index word at point with default macro}{C-c /}
\key{Insert an index entry}{C-c <}
\key{Add word to index phrases}{C-c \\}
\key{Visit index phrases buffer}{C-c |}
\key{Compile and display index}{C-c >}
\key{View cross reference$\sp1$}{C-c \&}
\key{View cross reference with mouse}{S-mouse-2}
\key{View cross reference from BibTeX file}{C-c \&}
\section{Standard keys in special buffers}
RefTeX's special buffers have many active keys. The common ones are:
\key{Display summary of active keys}{?}
\key{Select this item}{RET}
\key{Rescan the document}{r}
\key{Display location in other window}{SPC}
\key{Follow mode}{f}
\section{Multifile actions}
Since RefTeX scans the entire (multifile) document, it can provide
commands that act on all files of a document. Check the {\tt
Ref->Global Actions} menu for these commands.
\section{Variables}
To tell reftex about your own macro definitions, customize the
variables
\vskip-3mm
\beginexample
reftex-label-alist
reftex-section-levels
reftex-index-macros
reftex-cite-format
\endexample
\vskip2mm\hrule
$\sp1$ An argument of \kbd{C-u} triggers a document scan first. This can
be necessary if file content and RefTeX's knowledge are no longer
consistent.
\else % RefTeX compact version
\csname title\endcsname{RefTeX}
\section{Activation in .emacs}
\vskip-4mm
\beginexample
(add-hook 'LaTeX-mode-hook 'turn-on-reftex)
(setq reftex-plug-into-auctex t)
\endexample
\section{Table of Contents}
\key{Show the table of contents$\sp1$}{C-c =}
\key{Recenter *toc* buffer to here$\sp1$}{C-c -}
\section{Crossreferences, Citations, Index}
\key{Insert unique label$\sp1$}{C-c (}
\key{Reference a label$\sp1$}{C-c )}
\key{Insert citation with key selection}{C-c [}
\key{... prompt for optional arguments}{C-u C-c [}
\key{Index word at point with default macro}{C-c /}
\key{Insert an index entry}{C-c <}
\key{Add word to index phrases}{C-c \\}
\key{Visit index phrases buffer}{C-c |}
\key{Compile and display index}{C-c >}
\key{View cross reference$\sp1$}{C-c \&}
\key{View cross reference with mouse}{S-mouse-2}
\key{View cross reference from BibTeX file}{C-c \&}
\vskip2mm\hrule
$\sp1$ An argument of \kbd{C-u} triggers a document scan first.
\fi
\bye
%%% Local Variables:
%%% mode: plain-TeX
%%% TeX-master: t
%%% End:
| {
"alphanum_fraction": 0.7089668011,
"avg_line_length": 28.3548387097,
"ext": "tex",
"hexsha": "b022df9c1bbe3a67c30ba8cecfe765cb4889c6c0",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "966a2dce37cef7d447e063ac5c2cd91a4388d2e0",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "oscu0/orpheus",
"max_forks_repo_path": "deployed/home/.emacs.d/elpa/auctex-12.2.0/doc/tex-ref.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "966a2dce37cef7d447e063ac5c2cd91a4388d2e0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "oscu0/orpheus",
"max_issues_repo_path": "deployed/home/.emacs.d/elpa/auctex-12.2.0/doc/tex-ref.tex",
"max_line_length": 78,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "966a2dce37cef7d447e063ac5c2cd91a4388d2e0",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "oscu0/orpheus",
"max_stars_repo_path": "deployed/home/.emacs.d/elpa/auctex-12.2.0/doc/tex-ref.tex",
"max_stars_repo_stars_event_max_datetime": "2019-02-22T16:06:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-22T16:06:03.000Z",
"num_tokens": 6814,
"size": 19338
} |
\chapter{Evaluation and Testing}
In the following section, we outline how we will be evaluating and testing the suggested models.
Next, we will perform that evaluation and present the final results.
\section{Experimental Setup}
Since our deep model requires a large amount of computation, we like to make use of parallelization.
Hence, all of our experiments that involve deep learning will be run on an \textit{Amazon EC2 p2.xlarge} instance.
This VM has a \textit{NVIDIA K80 GPU} with 12 GiB of GPU memory.
All of the instances used were setup with both \textit{CUDA 8} and \textit{cuDNN v5.1} \cite{tensorflow,nvidia_developer_2017}.
The rest of the experiments are run on a 2016 MacBook Pro, with a 2.9GHz Intel Core i5 and 8GB of RAM, running MacOS 10.12.
To make sure that the same Python environment is used on both these machines, we consistently use \textit{Python 3.6} and a \textit{virtual environment} for the python dependencies.
As previously mentioned, the main dataset used is \texttt{GRESCHBACH} but we will also be using some of the data in the \texttt{WANG14} dataset to see how the model performs on data that was recorded under different circumstances.
For both these datasets, we will only be using the preprocessed Tor cells and not the raw TCP traffic data.
Finally, in all of the experiments that are be conducted below, we only consider an \textit{open-world scenario}.
This means that the test set will contain both monitored and unmonitored pages that the fingerprint extraction models and the classifiers have never seen before.
For this to work, we train the models on a large set of monitored web pages but also on a small percentage of unmonitored web pages such the classifiers can distinguish between both.
\section{Evaluation Techniques}
There are several different manners in which we can evaluate the feature selection models.
First of all, we could analyse how the model performs on unseen traces as it is learning.
If the difference between both the training error and the error on an unseen instance increases, the model will clearly be overfitting.
However, this data only show us how well the model is at reproducing the trace from a fingerprint but not how well the fingerprints perform in a WF attack.
For this we need to train a classifier and see how well it performs by using the metrics described in section \ref{sec:classifier-training}.
To be able to compare these fingerprints with hand-picked ones, we could train the classifiers with the hand-picked features and with the automatically generated ones.
These hand-picked features are often chosen by experts after a careful analysis.
Hence, if the classifier with our fingerprints were to get similar results or even outperform the classifiers with the hand-picked features, we know that the fingerprint extraction model has been successful.
For these results to be accurate, we do not change any (hyper)parameters within the classifiers.
Thus everything, except for the features, remains the same.
For the classifiers, we pick a small set of four existing models.
We aim to pick models that have had an influence on the WF field whilst also having a variety of different classifiers.
This set includes the two \textit{support vector classifiers} (SVCs) used by Panchenko et al. \cite{panchenko1,panchenko2},
the k-fingerprinting attack, which relies on a \textit{random forest} (RF) used by Hayes et al. \cite{kfingerprinting}
and finally the \textit{k-nearest neighbours} (kNN) classifier used by Wang et al. \cite{wang_cai_johnson_nithyanand_goldberg_2014}.
For all of these models, we extract the exact same features as outlined in the respective papers.
The code for this feature extraction process can be found in the \texttt{feature\_extraction} module.
We also aim to use the exact same hyperparameters described in the respective papers. More specifically:
\begin{itemize}
\item \textbf{SVC} \cite{panchenko1} - a \textit{radial basis function} (RBF) kernel with $C = 2^{17}$ and $\gamma = 2^{-19}$.
\item \textbf{SVC} \cite{panchenko2} - uses the same hyperparameters as in the previous SVC but with different features.
\item \textbf{RF} \cite{kfingerprinting} - a good accuracy/time tradeoff when $k = 3$ and $\textit{num\_trees} = 20$.
\item \textbf{kNN} \cite{wang_cai_johnson_nithyanand_goldberg_2014} - also has a good accuracy/time tradoff when $k = 2$ and $k_{\textit{reco}} = 5$.
\end{itemize}
We do need to note that these parameters have been specifically tuned for the hand-picked features and not for our fingerprints, which might have an impact on the performance.
\section{Evaluation}
As mentioned in section \ref{sec:fingerprint-extraction-training}, for both deep learning models. we need to make a couple design decisions regarding different architectures and learning parameters.
We perform several experiments here to see which ones are the most appropriate.
\subsection{Stacked Autoencoder}
\subsubsection{Learning Parameter Tuning}
First, we start by varying the mini-batch sizes from $20$ to $600$ in steps of $20$ for a simple model with an input layer of $3000$ cells, and two hidden layers with $1600$ and $200$ neurons respectively.
The higher the batch size, the longer it takes before making a weight update and the lower the value, the more noise in the training data.
We notice that a total batch size of $400$ seems to provide us with a good tradeoff.
Next, we tried a variety of different permutations of loss functions and optimizers and varied the learning rate from $0.01$ to $0.000001$.
THese experiments revealed that a \textit{mean squared error} (MSE) loss function with an \textit{RMSProp} optimizer and a $0.01$ learning rate continuously yield the most appropriate results.
Finally, we also use batch normalization for all experiments since our experiments show that it allows the model for faster convergence.
\subsubsection{Architecture Tuning}
Our experiments show that a \textit{sigmoid} activation function continuously results in better learning with a variety of different hidden layers with different sizes.
The amount of hidden layers is a slightly more difficult decision.
Since we want the simplest network possible that is able to learn a representation.
Hence, we experiment with networks with a total of $1$ up to $3$ hidden layers.
For each of these, the input layer will consist of $3000$ nodes and we will attempt to extract $200$ features, which means that the sizes of the hidden layers will gradually decrease to $200$ neurons.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{changing-depth}
\caption{Learning curves for changing the depth of the stacked autoencoder.}
\label{fig:changing-depth}
\end{figure}
Figure \ref{fig:changing-depth} shows us that a network with two hidden layers provides a good complexity/training error tradeoff.
Now that we know the depth of the network, we also need to consider changing the size of the final hidden layer since it represents the amount of features that will be extracted.
The more features we introduce, the more time and data we require to learn the classification task.
Whilst if the amount of features is too low, the classifiers might not be able to learn how to effectively classify any of the web pages.
Hence, we base the size of the final state on the amount of features used in previous WF attacks.
\begin{table}[ht]
\centering
\begin{tabular}{ r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Features}} \\ \hline
SVC \cite{panchenko1} & $305$ \\
SVC \cite{panchenko2} & $104$ \\
RF \cite{kfingerprinting} & $150$ \\
kNN \cite{wang_cai_johnson_nithyanand_goldberg_2014} & $3737$ \\
\hline
\end{tabular}
\caption{Amount of features for existing attacks.}
\label{table:feature-wf-attacks}
\end{table}
Based on table \ref{table:feature-wf-attacks}, we vary the amount of features between $100$ and $300$ in steps of $50$.
From figure \ref{fig:changing-output-size}, we determine that around $200$ nodes provides us with the best tradeoff.
Therefore, throughout the rest of the report when we refer to a stacked autoencoder, its architecture will consist of $3$ layers with $3000$, $1600$ and $200$ nodes each.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{changing-output-size}
\caption{Learning curves for changing the size of the middle layer in the stacked autoencoder}
\label{fig:changing-output-size}
\end{figure}
\newpage
\subsection{Sequence-to-Sequence Model}
\subsubsection{Learning Parameter Tuning}
We try to aim to get the appropriate values for the learning parameters within a simple encoder and decoder with LSTM cells and $120$ hidden states.
After experimentation, the maximum batch size that our EC2 instance could handle memory-wise is around $400$.
Thus through the rest of the report we will use a mini-batch size of $400$.
Next, we vary the learning rate $\gamma$ from $0.01$ to $0.000001$ with various optimizers (\textit{adam}, \textit{gradient descent} or \textit{RMSProp}) and loss functions (\textit{mean squared error (MSE)} or \textit{absolute loss} (AL)).
After trying a wide variety of different permutations, an \textit{adam optimizer} continuously demonstrated better results.
We already expected this since adam optimizers are computationally efficient, require relatively little amount of memory and tend to perform well with problems that have a large amount of parameters \cite{kingma2014adam},
which is ideal since our network can be unrolled to large lengths.
Next, we also note that the best quality of data compression was achieved with a \textit{MSE loss function} and a learning rate of $0.000002$.
Hence, we set $\lambda = 0.000002$, $b = 400$ and use an adam optimizer with a MSE loss function for the rest of our experiments.
Since some of the traces are relatively long, it might be worth cutting the them after a certain amount of time.
However, to compare after which time to cut the trace, we cannot simply base our analysis on the learning curve because the shorter the trace, the smaller the error will be.
Therefore, we will cut the traces after $2$, $6$ and $10$ seconds, use these values to train a sequence-to-sequence model and train binary classifiers on the extracted fingerprints.
Next, we can compare the performance of these classifiers to analyse how much information each part of the trace carries.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{trace-cutting}
\caption{Average performance measures of all classifiers after cutting traces.}
\label{fig:trace-cutting}
\end{figure}
Figure \ref{fig:trace-cutting} shows us that majority of the information is in fact carried in the first couple of seconds of the trace.
Hence, for the rest of our experiments we will be cutting the traces after $10$ seconds.
Finally, we also use batch normalization for all experiments since it allows the model for faster convergence.
\subsubsection{Architecture Tuning}
Now that we have made a decision on which learning parameters to use, we can start changing the architecture of the sequence-to-sequence models to see which ones yield the best results.
\noindent
\textbf{Hidden States}
We first start by examining the amount of hidden states in the network.
These directly affect the size of the fingerprints that will be extracted.
In fact, if we use an LSTM cell, the amount of features extracted is exactly double the amount of hidden states.
Thus, based on table \ref{table:feature-wf-attacks}, we vary the amount of hidden states between $60$ to $140$ in steps of $20$ to see which ones yield the most appropriate results.
For these experiments we train a sequence-to-sequence model with a unidirectional encoder, LSTM cells and without cutting or reversing the traces.
The training data consists $120,000$ monitored and unmonitored web pages, which are shuffled to avoid overfitting on any specific web page.
We only train the model for one epoch, as we seem to have enough data for the model to converge within that epoch.
Hence, every sample that the model sees in the figure below is one that it has never seen before.
So we can easily determine that the model is not overfitting.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{varying-hidden-states}
\caption{MSE over the amount of traces processed for varying hidden states.}
\label{fig:varying-hidden-states}
\end{figure}
Figure \ref{fig:varying-hidden-states} clearly shows us that the smaller the amount of hidden states, the faster the network seems to learn the reconstruction task.
On the other hand, the higher the amount of states, the lower the final error seems to be.
Since we aim to compromise between computational complexity and the time it takes to train the model, around $100$ hidden states seems to be the most appropriate.
\newpage
\noindent
\textbf{Bidirectional}
For these experiments, we consider a smaller range of hidden state values from $80$ to $120$ in steps of $20$.
Again, for all of these we will be using LSTM cells without cutting or reversing the traces, with all of the learning parameters described above and the exact same training set used in the previous experiment.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{varying-hidden-states2}
\caption{MSE over the amount of traces processed for varying hidden states for a bidirectional encoder.}
\label{fig:varying-hidden-states2}
\end{figure}
\noindent
As can be seen in figure \ref{fig:varying-hidden-states2}, around $80$ hidden states seems to provide the best complexity/error tradeoff.
\newpage
\noindent
\textbf{LSTM or GRU Cells}
Here, we train a sequence-to-sequence model with both a unidirectional and bidirectional encoder.
These will both have GRU and LSTM cells with $100$ and $80$ hidden states respectively.
Furthermore, we recreate the exact same training conditions as in the previous experiments.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{GRUcell}
\caption{Learning curves for different cell types.}
\label{fig:varying-cell-type}
\end{figure}
As can be seen in figure \ref{fig:varying-cell-type}, all of the different architectures converge to a similar value.
Although GRU cells seem to converge faster, they are also slightly more unstable especially around batch $80$ to $100$.
The most stable model seems to be the bidirectional encoder with LSTM cells.
Although this specific model has more parameters than the other sequence-to-sequence models, the time difference in training is only minimal
Hence, throughout the rest of the report when we refer to a sequence-to-sequence model, its architecture consists of a bidirectional encoder with $80$ hidden states.
\subsection{Classifier Performance}
We have previously analysed the models' performance based on how well they reconstruct the original input from a fingerprint.
But to examine how well our models perform during a real WF attack, we compare the performance on different existing classifiers with hand-picked features.
This means that we choose a set of existing WF attacks and recreate them.
Next we run the exact same attack but with both the hand-picked and automatically generated features.
Note that our results might be slightly lower than in their respective papers since we do not aim to recreate the full attack.
Rather than optimizing different hyperparameters, we aim to use these classifiers and the hand-picked features as an indicator as to how well the fingerprint extraction models perform.
We expect that the automatically generated features will perform worse than the hand-picked ones due to the complexity of the task.
However, we still hope to show that it is in fact possible to automate this feature selection process till a certain extent.
As mentioned in section \ref{sec:threat-model}, there are two main threat models that we need to consider.
The first one is a binary classification task, where the adversary wants to see whether or not a user is visiting any webpages within a given set.
Whilst the other threat model involves the adversary having a set of monitored pages, and wants to know which specific pages the user is visiting in that set.
Hence, it is a multiclass classification problem.
Although there are different techniques for evaluating binary and multiclass classification models, we will only use the scoring statistics outlined in section \ref{sec:classifier-training}.
This allows us for easy comparisons between the different threat models.
We do expect that the binary classification models will perform better than the multiclass ones due to the smaller amount of options available.
Aforementioned, we have already selected a total of four different existing attacks.
We will refer to the first SVC attack by Panchenko et al. \cite{panchenko1} as \texttt{svc1} and the second one \cite{panchenko2} as \texttt{svc2}.
Whilst we refer the k-fingerprinting attack by Hayes et al. \cite{kfingerprinting} as \texttt{RF} and finally the attack by Wang et al. \cite{wang_cai_johnson_nithyanand_goldberg_2014} as \texttt{kNN}.
\subsubsection{Binary Classification}
We first start by analysing the simplest threat model, namely binary classification.
For all of the models below, we aim to extract the exact same hand-picked features as were described in the respective papers to the best of our knowledge.
For training these models, we use an extract from the \texttt{GRESCHBACH} dataset with a total of $100$ monitored web pages with $70$ instances each and $5000$ unmonitored web pages.
We then split this set into a training and validation set using a stratified split.
The training set will contain $90\%$ of the monitored web pages whilst we vary the percentage of unmonitored pages to see how the models perform.
After the set is split up into a training and validation set, we perform a \textit{stratified k-fold validation} with $k = 3$ on the training set.
Then finally we train the classifiers on all of the training data and evaluate them on the test set.
The results for the k-fold validation on the training set for the hand-picked features are outlined in table \ref{table:hand-picked-bin}.
Here, we used a total of $10\%$ of the unmonitored data for training.
As expected, the results with a small amount of unmonitored data is relatively high.
\begin{table}[ht]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc1} & $0.91 \pm 0.003$ & $0.99 \pm 0.001$ & $0.97 \pm 0.001$ & $0.07 \pm 0.002$ & $0.90 \pm 0.005$ \\
\texttt{svc2} & $0.91 \pm 0.008$ & $0.99 \pm 0.001$ & $0.95 \pm 0.003$ & $0.06 \pm 0.004$ & $0.90 \pm 0.008$ \\
\texttt{RF} & $0.93 \pm 0.003$ & $0.99 \pm 0.001$ & $0.97 \pm 0.006$ & $0.05 \pm 0.003$ & $0.92 \pm 0.005$ \\
\texttt{kNN} & $0.88 \pm 0.007$ & $0.99 \pm 0.003$ & $0.97 \pm 0.004$ & $0.10 \pm 0.002$ & $0.94 \pm 0.004$ \\
\hline
\end{tabular}
\caption{Performance statistics hand-picked features on a binary classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:hand-picked-bin}
\end{table}
Next, we will be analyzing the performance of these classifiers with the automatically generated features.
We do note that from here on we refer to \texttt{svc1} and \texttt{svc2} as \texttt{svc} since both \texttt{svc1} and \texttt{svc2} have the same hyperparameters but were trained on different hand-picked features.
So they would get the same results on the automatically generated features anyway.
\begin{table}[ht]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc} & $0.92 \pm 0.001$ & $0.99 \pm 0.001$ & $0.98 \pm 0.001$ & $0.07 \pm 0.002$ & $0.89 \pm 0.003$ \\
\texttt{RF} & $0.77 \pm 0.012$ & $0.76 \pm 0.004$ & $0.87 \pm 0.009$ & $0.15 \pm 0.004$ & $0.86 \pm 0.007$ \\
\texttt{kNN} & $0.74 \pm 0.010$ & $0.73 \pm 0.009$ & $0.85 \pm 0.004$ & $0.18 \pm 0.007$ & $0.84 \pm 0.009$ \\
\hline
\end{tabular}
\caption{Performance statistics autoencoder features on a binary classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:ae-bin}
\end{table}
\begin{table}[ht]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc} & $0.93 \pm 0.001$ & $0.99 \pm 0.001$ & $0.99 \pm 0.001$ & $0.06 \pm 0.003$ & $0.90 \pm 0.002$ \\
\texttt{RF} & $0.86 \pm 0.004$ & $0.99 \pm 0.001$ & $0.83 \pm 0.003$ & $0.07 \pm 0.005$ & $0.88 \pm 0.003$ \\
\texttt{kNN} & $0.81 \pm 0.008$ & $0.95 \pm 0.007$ & $0.97 \pm 0.007$ & $0.14 \pm 0.012$ & $0.89 \pm 0.009$ \\
\hline
\end{tabular}
\caption{Performance statistics sequence-to-sequence features on a binary classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:seq2seq-bin}
\end{table}
Both table \ref{table:ae-bin} and \ref{table:seq2seq-bin} show that the performance on a small amount of unmonitored pages is similar to the \texttt{svc} model but slightly lower for both the \texttt{RF} and \texttt{kNN} attacks.
Now we will measure the performance when training the classifiers on the full training set and evaluating them on the validation set, whilst changing the amount of unmonitored pages we train the model on.
Clearly, figure \ref{fig:bin-unmon-performance} shows us that the models suffer if we introduce a large amount of unmonitored pages in the test set.
But the more unmonitored instances we train on, the better the classifiers seem to perform.
Additionally, figure \ref{fig:bin-unmon-performance} also shows that the \texttt{RF} classifier seems to perform best whilst training in on both the hand-picked and automatically generated features.
Next, we also note that the hand-picked features currently still get the best overall performance, followed by the sequence-to-sequence features and the autoencoder features.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{bin-unmon-performance}
\caption{Varying the amount of unmonitored pages trained on for different features.}
\label{fig:bin-unmon-performance}
\end{figure}
\subsubsection{Multiclass Classification}
The multiclass classification scenario is slightly more complex due to the larger array of options.
Hence, we also expect considerably lower results, especially on the test set with a large amount of unmonitored pages.
\begin{table}[ht]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc1} & $0.57 \pm 0.013$ & $0.99 \pm 0.001$ & $0.59 \pm 0.014$ & $0.08 \pm 0.004$ & $0.70 \pm 0.012$ \\
\texttt{svc2} & $0.59 \pm 0.007$ & $0.99 \pm 0.001$ & $0.61 \pm 0.007$ & $0.07 \pm 0.007$ & $0.72 \pm 0.009$ \\
\texttt{RF} & $0.59 \pm 0.011$ & $0.99 \pm 0.001$ & $0.58 \pm 0.011$ & $0.02 \pm 0.004$ & $0.72 \pm 0.012$\\
\texttt{kNN} & $0.55 \pm 0.015$ & $0.92 \pm 0.006$ & $0.55 \pm 0.008$ & $0.09 \pm 0.005$ & $0.69 \pm 0.013$ \\
\hline
\end{tabular}
\caption{Performance statistics hand-picked features on a multiclass classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:mult-handpicked-test-error}
\end{table}
Table \ref{table:mult-handpicked-test-error} shows that the performance does indeed drop on the multiclass classification task.
On the other hand, both table \ref{table:mult-ae-test-error} and \ref{table:mult-seq2seq-test-error} show that the performance for automatically generated features is even lower than the hand-picked ones.
\begin{table}[ht]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc} & $0.22 \pm 0.003$ & $0.54 \pm 0.002$ & $0.17 \pm 0.002$ & $0.16 \pm 0.004$ & $0.29 \pm 0.004$ \\
\texttt{RF} & $0.25 \pm 0.009$ & $0.62 \pm 0.003$ & $0.18 \pm 0.008$ & $0.13 \pm 0.007$ & $0.30 \pm 0.009$\\
\texttt{kNN} & $0.20 \pm 0.015$ & $0.48 \pm 0.006$ & $0.17 \pm 0.006$ & $0.20 \pm 0.007$ & $0.28 \pm 0.011$ \\
\hline
\end{tabular}
\caption{Performance statistics autoencoder features on a multiclass classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:mult-ae-test-error}
\end{table}
\begin{table}[!htb]
\centering
\begin{tabular}{ r r r r r r } \hline
\multicolumn{1}{c}{\textbf{Model}} & \multicolumn{1}{c}{\textbf{Accuracy}} & \multicolumn{1}{c}{\textbf{BDR}} & \multicolumn{1}{c}{\textbf{TPR}} &
\multicolumn{1}{c}{\textbf{FPR}} & \multicolumn{1}{c}{\textbf{F1}} \\ \hline
\texttt{svc} & $0.35 \pm 0.004$ & $0.69 \pm 0.008$ & $0.24 \pm 0.003$ & $0.13 \pm 0.014$ & $0.37 \pm 0.002$ \\
\texttt{RF} & $0.39 \pm 0.005$ & $0.83 \pm 0.004$ & $0.27 \pm 0.008$ & $0.07 \pm 0.014$ & $0.42 \pm 0.006$\\
\texttt{kNN} & $0.31 \pm 0.011$ & $0.56 \pm 0.004$ & $0.22 \pm 0.09$ & $0.20 \pm 0.003$ & $0.33 \pm 0.009$ \\
\hline
\end{tabular}
\caption{Performance statistics sequence-to-sequence features on a multiclass classification task with k-fold validation whilst training on $10\%$ of the unmonitored pages.}
\label{table:mult-seq2seq-test-error}
\end{table}
\begin{figure}[!htb]
\centering
\includegraphics[width=\textwidth]{mult-unmon-performance}
\caption{Varying the amount of unmonitored pages trained on for different features.}
\label{fig:mult-unmon-performance}
\end{figure}
But the behavior of the classifiers gets even more interesting when training on a larger amount of unmonitored pages.
Figure \ref{fig:mult-unmon-performance} shows that the stacked autoencoder has a similar performance as on the binary classification task.
Whilst the sequence-to-sequence model performs almost as well as the hand-picked features.
In fact, the sequence-to-sequence features even outperforms the hand-picked features on the \texttt{svc}.
\subsubsection{Different Circumstances}
Beside analysing how the fingerprint extraction models perform on data within the same dataset, it would be interesting to examine how it performs on data recorded under different circumstances.
It has already been shown that the performance of the classifiers is greatly impacted by the network, time and the TBB version.
But that doesn't necessarily mean that our fingerprint extraction model is impacted similarly.
If the deep learning models are not impacted by these flaws, an adversary would only need to train the fingerprint extraction model once and then it could continue to use it and only retrain the classifiers, like some sort of \textit{transfer learning} \cite{transfer_learning}.
To test this premise, we use the models that we previously trained on the same $120,000$ web pages within the \texttt{GRESCHBACH} dataset.
More specifically, a LSTM bidirectional encoder with $80$ hidden states and a stacked autoencoder with sizes of the hidden layers being $3000$, $1600$ and $200$.
Next, we extract the fingerprints from the Tor cells within the \texttt{WANG14} dataset using this model, train a set of classifiers on these fingerprints using k-fold validation and note down their performance.
For the following experiments, we train the classifiers using $k = 3$ and for both datasets, we pick a total $100$ monitored web pages with $70$ instances each and $5000$ unmonitored web pages.
\begin{figure}[ht]
\centering
\includegraphics[width=\textwidth]{different-circumstances}
\caption{Classifier performance on the WANG14 dataset with automatically extracted features.}
\label{fig:different-circumstances}
\end{figure}
Figure \ref{fig:different-circumstances} shows us that the performance drops slightly when extracting features on the \texttt{WANG14} dataset.
Especially with the stacked autoencoder.
However, the sequence-to-sequence model seems to achieve similar F1 scores on both the \texttt{GRESCHBACH} and \texttt{WANG14} dataset.
\section{Unit Tests}
On top of evaluating the results, we also needed to ensure that the code behaves as we expect it to.
For this we use unit tests.
Some bits of the code, such as the Tensorflow models are difficult to test but we can still test all of the preprocessing to see the correct values are produced.
For this we use Python's standard \texttt{unittest} module \cite{python_unittest_documentation}.
The reason for this choice is that it is flexible and the standard Python unit testing framework, which means it is commonly used.
On top of unit tests, \textit{Travis} was also used \cite{travis}.
Travis is a popular tool, that has an easy integration with Github, for continuous integration.
Therefore, every time a commit is pushed to the remote repository, Travis runs all of the tests automatically.
If one of the tests fails, Travis immediately notifies all the contributors.
Finally, to check if our tests cover our entire codebase, an online service, called \textit{codecov} is used \cite{codecov}.
This tool automatically checks how much of the codebase all of the unit tests cover.
At the time of writing, the coverage is $93\%$.
The bits that aren't covered by unit tests, such as the Tensorflow implementation of the deep learning models, have been carefully examined to see if they behave as expected by using the Tensorflow debugger \cite{tensorflow}.
| {
"alphanum_fraction": 0.7549335614,
"avg_line_length": 65.8095238095,
"ext": "tex",
"hexsha": "65644940b79255d0666020d87ddd9f75ea33ca68",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-01-25T11:33:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-30T14:23:05.000Z",
"max_forks_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "henghengxiong/website-fingerprinting",
"max_forks_repo_path": "report/chapters/evaluation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "henghengxiong/website-fingerprinting",
"max_issues_repo_path": "report/chapters/evaluation.tex",
"max_line_length": 278,
"max_stars_count": 26,
"max_stars_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "AxelGoetz/website-fingerprinting",
"max_stars_repo_path": "report/chapters/evaluation.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T03:38:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-08-26T15:54:21.000Z",
"num_tokens": 8287,
"size": 30404
} |
\section {PlanesJavaFx}
\subsection{Layout}
The layout of the PlanesJavaFx GUI consists mainly of:
\begin{itemize}
\item LeftPane - implements the left pane
\item RightPane - implements the right pane
\end{itemize}
The left pane is a tab widget with three tabs:
\begin{itemize}
\item a board editing tab, containing the controls to move the planes left, right, downwards and upwards, as well as to rotate them. In this tab there are also a control for toggling the currently selected plane and a button to confirm that the plane positioning is completed.
\item a game tab, which shows the moves statistics during the game
\item a start new round tab, showing the global score and a button allowing to start a new round
\end{itemize}
The right pane is a tab widget with two tabs: one is the player game board and the other is the computer game board.
\subsection{Displaying the Game Boards}
\subsubsection {PlaneRoundJavaFx - Use of Java Native Interface}
Because the GUI is implemented with Java and the game engine with C++, we used Java Native Interface to gain access to the C++ game engine from the Java side.
\begin{lstlisting} [caption={PlaneRoundJavaFX Interface}]
public class PlaneRoundJavaFx {
static {
System.loadLibrary("libCommon"); // Load native library
}
//creates the PlaneRound object in the game engine
//must be called a single time
public native void createPlanesRound();
//show the planes
public native int getRowNo();
public native int getColNo();
public native int getPlaneNo();
public native int getPlaneSquareType(int i, int j, int isComputer);
//edit the board
public native int movePlaneLeft(int idx);
public native int movePlaneRight(int idx);
public native int movePlaneUpwards(int idx);
public native int movePlaneDownwards(int idx);
public native int rotatePlane(int idx);
public native void doneClicked();
//play the game
public native void playerGuess(int row, int col);
public native boolean playerGuess_RoundEnds();
public native boolean playerGuess_IsPlayerWinner();
public native boolean playerGuess_ComputerMoveGenerated();
public native int playerGuess_StatNoPlayerMoves();
public native int playerGuess_StatNoPlayerHits();
public native int playerGuess_StatNoPlayerMisses();
public native int playerGuess_StatNoPlayerDead();
public native int playerGuess_StatNoPlayerWins();
public native int playerGuess_StatNoComputerMoves();
public native int playerGuess_StatNoComputerHits();
public native int playerGuess_StatNoComputerMisses();
public native int playerGuess_StatNoComputerDead();
public native int playerGuess_StatNoComputerWins();
public native void roundEnds();
public native void initRound();
//show the guesses
public native int getPlayerGuessesNo();
public native int getPlayerGuessRow(int idx);
public native int getPlayerGuessCol(int idx);
public native int getPlayerGuessType(int idx);
public native int getComputerGuessesNo();
public native int getComputerGuessRow(int idx);
public native int getComputerGuessCol(int idx);
public native int getComputerGuessType(int idx);
}
\end{lstlisting}
The class PlaneRoundJavaFx loads the libCommon library and defines a series of methods which are declared with the keyword native, that is they are implemented in a C/C++ library. The native methods represent the only gate of access of the Java GUI to the Planes game engine. They do the following:
\begin{itemize}
\item createPlanesRound() - initialize the game engine
\item getRowNo() - gets the size of the game board
\item getColNo() - gets the size of the game board
\item getPlaneNo() - gets the plane number
\item getPlaneSquareType(int i, int j, int isComputer) - for a square on the game board returns what it contains: a plane head, plane, not plane, game board, outside the game board
\item movePlaneLeft() - repositions the selected plane to the left
\item movePlaneRight() - repositions the selected plane to the right
\item movePlaneUpwards() - repositions the selected plane upwards
\item movePlaneDownwards() - repositions the selected plane downwards
\item rotatePlane() - rotates 90 degrees the selected plane
\item doneClicked() - end board editing phase
\item getPlayerGuessesNo() - how many guesses has the player made
\item getPlayerGuessRow() - coordinate of the desired player guess
\item getPlayerGuessCol() - coordinate of the desired player guess
\item getPlayerGuessType() - result of the desired player guess
\item getComputerGuessesNo() - how many guesses has the computer made
\item getComputerGuessRow() - coordinate of the desired computer guess
\item getComputerGuessCol() - coordinate of the desired computer guess
\item getComputerGuessType() - result of the desired computer guess
\item playerGuess(int row, int col) - communicate a guess of the player to the game engine
\item playerGuess\_RoundEnds() - does the round end
\item playerGuess\_IsPlayerWinner() - who won
\item playerGuess\_ComputerMoveGenerated() - if a computer move was generated
\item playerGuess\_StatNoPlayerMoves() - statistics about the player's moves
\item playerGuess\_StatNoPlayerHits() - statistics about the player's moves
\item playerGuess\_StatNoPlayerMisses() - statistics about the player's moves
\item playerGuess\_StatNoPlayerDead() - statistics about the player's moves
\item playerGuess\_StatNoPlayerWins() - number of wins for the player
\item playerGuess\_StatNoComputerMoves() - statistics about the computers's moves
\item playerGuess\_StatNoComputerHits() - statistics about the computers's moves
\item playerGuess\_StatNoComputerMisses() - statistics about the computers's moves
\item playerGuess\_StatNoComputerDead() - statistics about the computers's moves
\item playerGuess\_StatNoComputerWins() - number of wins for the computer
\item roundEnds() - do what is required when the round ends
\item initRound() - do what is required to initialize a new round
\end{itemize}
Corresponding to this Java class, a C++ implementation of the required functionality was created in the game engine library. The C++ implementation is a wrapper around the PlaneRound game controller where almost all functions are single method calls of the PlaneRound object. The header of the implementation is created automatically with the javac tool. An excerpt is given in the following listing (TODO applies this only to Windows):
\begin{lstlisting} [caption={C++ Implementation for PlaneRoundJavaFx}]
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_planes_javafx_PlaneRoundJavaFx */
#ifndef _Included_com_planes_javafx_PlaneRoundJavaFx
#define _Included_com_planes_javafx_PlaneRoundJavaFx
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: com_planes_javafx_PlaneRoundJavaFx
* Method: createPlanesRound
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_com_planes_javafx_PlaneRoundJavaFx_createPlanesRound
(JNIEnv *, jobject);
.....
#ifdef __cplusplus
}
#endif
#endif
\end{lstlisting}
In the .cpp file all the functions work with one or more of these 3 global objects:
\begin{lstlisting}[caption={Global Variables}\label{Global_Variable_Definitions}]
PlaneRound* global_Round = nullptr;
GuessPoint::Type global_Guess_Result = GuessPoint::Miss;
PlayerGuessReaction global_Player_Guess_Reaction;
\end{lstlisting}
global\_Round is the game controller, global\_Guess\_Result is the result of the evaluation of the last player guess, global\_Player\_Guess\_Reaction is the response of the game engine to the last player guess.
global\_Round is created with the function createPlanesRound() from PlaneRoundJavaFx which corresponds to the function\\ Java\_com\_planes\_javafx\_PlaneRoundJavaFx\_createPlanesRound() in the C++ implementation file. global\_Guess\_Result and global\_Player\_Guess\_Reaction are obtained from the following function call:
\begin{lstlisting}
global_Round->playerGuessIncomplete(int(row), int(col), global_Guess_Result, global_Player_Guess_Reaction);
\end{lstlisting}
The function playerGuessIncomplete is defined in the PlaneRound class (\ref{Game_Controller}).
\subsubsection {The BoardPane Class}
The computer's game board and the player's game board are parts of the right pane and are displayed using the functionality offered by the PlaneRoundJavaFx object. The functionality is implemented in the class BoardPane.
The member variables are as follows:
\begin{lstlisting} [caption={PlaneBoard's Member Variable}]
private Map<PositionBoardPane, Canvas> m_GridSquares;
private PlaneRoundJavaFx m_PlaneRound;
private RightPane m_RightPane;
private int m_Padding = 3;
private boolean m_IsComputer = false;
private int m_MinPlaneBodyColor = 0;
private int m_MaxPlaneBodyColor = 200;
private GameStages m_CurStage = GameStages.BoardEditing;
private int m_SelectedPlane = 0;
private EventHandler<MouseEvent> m_ClickedHandler;
private Text m_AnimatedText;
private GridPane m_GridPane;
private int m_GRows = 10;
private int m_GCols = 10;
private int m_PlaneNo = 3;
private int m_ColorStep = 50;
\end{lstlisting}
The gameboard is a rectangle of squares with a padding, allowing the rotation of planes when they are close to the boundary of the board. How big is the padding is defined by the member variable m\_Padding. Whether the board belongs to the player or to the computer is defined by the boolean m\_IsComputer. For editing the game board planes must be selected in order to move them. The currently selected plane is defined with the variable m\_SelectedPlane. Each of the planes on the game board is displayed in a gray tone, which is defined by the maximum and minimum gray tone as well as by the number of planes. The maximum and minimum gray tone levels are defined by the variables m\_MinPlaneBodyColor and m\_MaxPlaneBodyColor. The current stage of the game (game, board editing, game not started yet) is defined by the variable m\_CurStage.
What is displayed in each of the cells of the game board is defined with the map m\_GridSquares, these are objects of the Canvas class, which is a graphical object in which one can draw. The layout of the board is defined with the m\_GridPane variable. What happens when the user clicks on board cell is defined with the event handler m\_ClickedHandler.
At the end of a round an animated text is displayed in the computer's board. This text is defined with the variable m\_AnimatedText.
A reference to the right pane is saved in the variable m\_RightPane such that methods of this class can be directly called. Finally the game engine is saved in the variable m\_PlaneRound.
The color of the board cell is computed with the following function:
\begin{lstlisting} [caption={Computing Grid Board Colors}]
public Color computeSquareBackgroundColor(int i, int j) {
Color squareColor = null;
if (i < m_Padding || i >= m_GRows + m_Padding || j < m_Padding || j >= m_GCols + m_Padding) {
squareColor = Color.YELLOW;
} else {
squareColor = Color.AQUA;
}
if (!m_IsComputer || (m_IsComputer && m_CurStage == GameStages.GameNotStarted)) {
int type = m_PlaneRound.getPlaneSquareType(i - m_Padding, j - m_Padding, m_IsComputer ? 1 : 0);
switch (type) {
//intersecting planes
case -1:
squareColor = Color.RED;
break;
//plane head
case -2:
squareColor = Color.GREEN;
break;
//not a plane
case 0:
break;
//plane but not plane head
default:
if ((type - 1) == m_SelectedPlane) {
squareColor = Color.BLUE;
} else {
int grayCol = m_MinPlaneBodyColor + type * m_ColorStep;
squareColor = Color.rgb(grayCol, grayCol, grayCol);
}
break;
}
}
return squareColor;
}
\end{lstlisting}
\subsection{C++ Concepts}
\subsubsection{Global Variables}
The three variables defined in \ref{Global_Variable_Definitions} are global variables, that is they are defined outside the body of any function. As opposed to local variables, which are defined inside the body of functions, the lifetime of these variables is the entire lifetime of the program. They also have file scope, that means they are accessible from functions defined in the same file as the global variables.
\subsection{JavaFx Concepts}
\subsubsection{Layouts}
The following layout of JavaFx were used: GridPane, a grid like layout, TabPane, a window with more tabs.
\subsubsection{Properties and Property Binding}
Properties in JavaFx are data containers which can signal to other components when the associated values change. Properties can be interconnected one to another through a mechanism called binding. That allows for example the resizing of the game board grid squares when the game window is resized.
TODO: Example
\subsection {Java Concepts}
\subsubsection{Access Specifiers}
Access specifiers in Java are the same as in C++ (TODO: reference) except that they are placed in front of each member variable or member function.
\subsubsection {Event Handlers}
Event handlers in Java are functions that are automatically called when an event occurs.
TODO: Example | {
"alphanum_fraction": 0.7834390039,
"avg_line_length": 45.6132404181,
"ext": "tex",
"hexsha": "9b565eb0dcee4ddc05b5ec3c41f1d12bfb4e12ee",
"lang": "TeX",
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2022-01-10T07:16:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-07-25T00:47:00.000Z",
"max_forks_repo_head_hexsha": "025eddfe5ff52ed5ca65cedfea0ce4f346a78b65",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "temp-byte/planes",
"max_forks_repo_path": "Book/PlanesJavaFx.tex",
"max_issues_count": 18,
"max_issues_repo_head_hexsha": "025eddfe5ff52ed5ca65cedfea0ce4f346a78b65",
"max_issues_repo_issues_event_max_datetime": "2022-01-12T12:39:51.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-25T17:50:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "temp-byte/planes",
"max_issues_repo_path": "Book/PlanesJavaFx.tex",
"max_line_length": 843,
"max_stars_count": 18,
"max_stars_repo_head_hexsha": "025eddfe5ff52ed5ca65cedfea0ce4f346a78b65",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "temp-byte/planes",
"max_stars_repo_path": "Book/PlanesJavaFx.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-06T23:17:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-07-24T20:07:14.000Z",
"num_tokens": 3069,
"size": 13091
} |
\chapter{Idealized rates and instruments}
\label{chap:instuments}
\added[comment={Added}]{In this chapter we review idealized versions of interest rates and derivatives of interest rates. By idealized, we mean that the instruments are simplied for analytical purposes. For example, there is no lag between trade and spot date or expiry and delivery date. Neither we do not use funding rate that is separate form the market rates. The treatment is standard and is based mainly on} \textcite[pp. 1--22]{brigo2007interest} unless otherwise noted.
In the following, we assume that $0 < t < T$ are points of time and $\dayc(t,T) \in [ 0, \infty )$ is the day count convention between the points $t$ and $T$. We explicitly assume that $\dayc(t,T) \approx T-t$ when $t \approx T$.
\section{Fundamental rates and instruments}
\subsection{Short-rate, idealized bank account and stochastic discount factor}
When making calculations with idealized bank account, it is customary to assume that the day count-convention $\dayc(t,T) = T-t$ as this will simplify the notation. An idealized bank account is an instrument with the value
\begin{align}
\Bank(t) = \exp \left( \int\limits_0^t r(s) \dx s \right)
\end{align}
where $r(t)$ is the short-rate rate. The short-rate $r(t)$ may be non-deterministic but we assume that it is smooth enough so that the integral can be defined in some useful sense. We note that $\Bank(0)=1$. If $\delta > 0$ is very small and $r(t)$ is a smooth function, then
\begin{align}
\int\limits_t^{t+\delta} r(s) \dx s \approx r(t) \delta
\end{align}
and we see that the first-order expansion of exponential function yields
\begin{align}
\Bank(t+\delta) \approx \Bank(t) ( 1+r(t) \delta ) .
\end{align}
Thus the short-rate can be seen as continuous interest rate intensity. Short-rate is purely theoretical construction which can be used to price financial instruments.
Now we may define a stochastic discount factor $\DF(t,T)$ from time $t$ to $T$ as
\begin{align}
\DF(t,T) = \frac{\Bank(t)}{\Bank(T)} = \exp \left( - \int\limits_t^T r(s) \dx s \right) .
\end{align}
If $r(t)$ is a random variable, then $\Bank(t)$ and $\DF(t,T)$ are stochastic too.
\subsection{Zero-coupon bond}
A promise to pay one unit of currency at time $T$ is called a $T$-bond. We shall assume that there is no credit risk for these bonds. We further assume that the market is liquid and bond may be freely bought and sold at the same price, furthermore short selling is allowed without limits or extra fees. The price of this bond at time $t$ is denoted by $\Bond(t,T)$ and so $\Bond(t,T) > 0$ and $\Bond(T,T) = 1$.
As $\DF(0,t)$ is guaranteed to pay one unit of currency at the time $t$, we see that in this case $\DF(0,t) = \Bond(0,t)$. We note that if the short-rate $r(t)$ is deterministic, then
\begin{align}
\DF(0,t) = \frac{1}{\Bank(t)}
\end{align}
is deterministic too. We see that if short-rate $r(t)$ is deterministic, then $\DF(t,T) = \Bond(t,T)$ for all $0 \leq t \leq T$. But this does not hold if $r(t)$ is truly stochastic.
\subsection{Simple spot $\Rflt(t,T)$ and $k$-times compounded simple spot rate}
The simple spot rate $\Rflt(t,T)$ is defined by
\begin{align}
\Rflt(t,T) = \frac{1 - \Bond(t,T)}{\dayc(t,T)\Bond(t,T)} ,
\end{align}
which is equivalent to
\begin{align}
\label{discountandrate}
1 + \dayc(t,T) \Rflt(t,T) = \frac{1}{\Bond(t,T)} .
\end{align}
For $k \geq 1$, the $k$-times compounded interest rate from $t$ to $T$ is
\begin{align}
\Rflt^k(t,T) = \frac{k}{\Bond(t,T)^{\frac{1}{k \dayc(t,T)}}} - k,
\end{align}
which is equivalent to
\begin{align}
\Bond(t,T) \left( 1 + \frac{\Rflt^k(t,T)}{k} \right)^{k \dayc(t,T)} = 1 .
\end{align}
As
\begin{align}
(1 + \frac{x}{k})^k \longrightarrow \e^x
\end{align}
when $k \longrightarrow \infty$, then
\begin{align}
\left( 1 + \frac{L^k(t,T)}{k} \right)^{k\dayc(t,T)} \longrightarrow \e^{\dayc(t,T)r(t,T)} ,
\end{align}
where $\Rflt^k(t,T) \longrightarrow r(t,T)$ when $k \longrightarrow \infty$.
\subsection{Forward rate agreement}
A forward rate agreement (FRA) is a contract that pays
\begin{align}
\dayc_K(t,T) K - \dayc(t,T) \Rflt(t,T)
\end{align}
at the time $T$. Here we assume that the contact is made at the present time $0$ and $0 < t < T$, but this assumption is made just to keep the notation simplier. Here $K$ is an interest rate that is fixed at time $0$, $\dayc_K$ is the day count convention for the this fixed rate and $\Rflt(t,T)$ is the spot rate from time $t$ to $T$ (which might not be know at the present). The price of a FRA at the time $s \leq t$ is denoted by $\FRA(s,t,T,K)$. Now
\begin{align}
\FRA(t,t,T,K) = \Bond(t,T) \left( \dayc_K(t,T) K - \dayc(t,T) \Rflt(t,T) \right) .
\end{align}
In order to price a FRA at different times, we consider a portfolio of one long $T$-bond and $x$ short $t$-bonds. The value of this portfolio at the present is $V(0) = \Bond(0,T) - x \Bond(0,t)$ and we note that the portfolio has zero value if
\begin{align}
\label{FRAzeroprice}
x = \frac{\Bond(0,T)}{\Bond(0,t)}.
\end{align}
At the time $t$, the portfolio has value
\begin{align}
V(t) &= \Bond(t,T) - x \\
&= \Bond(t,T) \left( 1 - \frac{x}{\Bond(t,T)} \right)
\end{align}
where $\Bond(t,T)$ is known and
\begin{align}
1 + \dayc(t,T) \Rflt(t,T) = \frac{1}{\Bond(t,T)} = y(t,T).
\end{align}
We define $K^*(x) = x^{-1}$. Thus
\begin{align}
1 - \frac{x}{\Bond(t,T)} &= x \left( \frac{1}{x} - \frac{1}{\Bond(t,T)} \right) \\
&= x \left( K^*(x) - y(t,T) \right)
\end{align}
and this implies that
\begin{align}
\label{forwardpricelemmaquation}
V(t) &= x \Bond(t,T) \left( K^*(x) - y(t,T) \right) .
\end{align}
Without arbitrage
\begin{align}
\label{forwardpricelemmaquation1}
V(T) &= x \left( K^*(x) - y(t,T) \right) \\
&= x \left( K^*(x) - 1 - \dayc(t,T) \Rflt(t,T) \right)
\end{align}
We note that at the time $0$, $K^*(x)$ is a known yield but $y(t,T)$ is an unknown yield if $\Bond(t,T)$ is not deterministic. Now if
\begin{align}
K &= \frac{1}{\dayc_K(t,T)} \left( K^*(x) - 1 \right) \\
&= \frac{1}{\dayc_K(t,T)} \left( \frac{1}{x} - 1 \right)
\end{align}
the given portfolio can be used to replicate the cash flows of the FRA and
\begin{align}
x \FRA(s,t,T,K) = V(s) .
\end{align}
If
\begin{align}
x &= \frac{\Bond(0,T)}{p(0,t)}
\end{align}
then $V(0) = 0$ and
\begin{align}
K &= \frac{1}{\dayc_K(t,T)} \left( \frac{\Bond(0,t)}{\Bond(0,T)} - 1 \right) \\
&= \frac{\dayc(t,T)}{\dayc_K(t,T)} \Rflt(t,T) .
\end{align}
We see that the forward rate and the rate that defines FRA with zero present value are essentially the same. Thus we define that the forward rate at the time $t$ from time $T$ to $S$ is
\begin{align}
\Rflt(t,T,S) &= \frac{1}{\dayc(T,S)} \left( \frac{\Bond(t,T)}{\Bond(t,S)} - 1 \right) .
\end{align}
Since $\dayc(T,S) \approx S-T$ when $T \approx S$, we have that
\begin{align}
\Rflt(t,T,S) &= \frac{1}{\dayc(T,S)} \left( \frac{\Bond(t,T)}{\Bond(t,S)} - 1 \right) \\
&\approx \frac{1}{\Bond(t,T)} \frac{\Bond(t,T) - \Bond(T,S)}{S-T}
\end{align}
and therefore
\begin{align}
\Rflt(t,T,S) & \longrightarrow - \frac{1}{\Bond(t,T)} \frac{ \partial \Bond(t,T) }{ \partial t} \\
&= - \frac{\partial \log \Bond(t,T)}{\partial T}
\end{align}
when $S \rightarrow T^+$ under the assumption that the zero curve $\Bond(t,T)$ is differentiable. We now define that the instantaneous forward rate at the time $t$ is
\begin{align}
\Forwardrate(t,T) = - \frac{\partial \log \Bond(t,T)}{\partial T}.
\end{align}
Now since $\Bond(t,t) = 1$,
\begin{align}
- \int_t^T f(t,s) \dx s &= \int_t^T \partial \log \Bond(t,s) \dx s \\
&= \log \Bond(t,T) - \log \Bond(t,t) \\ &= \log \Bond(t,T)
\end{align}
meaning that
\begin{align}
\Bond(t,T) = \exp \left( - \int\limits_t^T \Forwardrate (t,s) \dx s \right) .
\end{align}
\section{Interest rate instruments}
\subsection{Fixed leg and floating leg}
A leg with tenor $t_0 < t_1 < t_2 < \ldots < t_n = T$ and coupons $c_1, c_2, \ldots, c_n$ is an instruments that pays $c_i$ at the time $t_i$ for all $1 \leq i \leq n$. The coupons may be functions of some variables. Thus a is a portfolio of $n$ zero-coupon bonds with maturities coinciding with tenor. It has has present value of
\begin{align}
\sum_{i=1}^n c_i \Bond (t, t_i) \1_{ \{ t \geq t_i \} }
\end{align}
at the time $t$.
A floating leg with a unit principal has coupons defined by $c_i = \dayc_1(t_{i-1}, t_i) \Rflt(t_{i-1}, t_i)$, where $\Rflt$ is a reference rate for a floating. It has a present value of
\begin{align}
PV_{\text{float}}(t) &= \sum_{i=1}^n \Bond (t,t_i) \dayc_1(t_{i-1}, t_i) \Rflt(t_{i-1}, t_i) \\
&= \sum_{i=1}^n \Bond (t,t_i) \left( \frac{1}{\Bond(t_{i-1},t_i)} -1 \right) \\
&= \sum_{i=1}^n \Bond (t,t_{i-1}) \Bond (t_{i-1},t_i) \left( \frac{1}{\Bond(t_{i-1},t_i)} -1 \right) \\
&= \sum_{i=1}^n \left( \Bond (t,t_{i-1}) - \Bond (t,t_{i-1})\Bond (t_{i-1},t_i) \right) \\
&= \sum_{i=1}^n \left( \Bond (t,t_{i-1}) - \Bond (t,t_i) \right) \\
&= \Bond (t,t_{0}) - \Bond (t,t_n)
\end{align}
and especially $PV_{\text{float}}(t_0) = 1 - \Bond (t,t_n)$.
If the coupons are $c_i = K \dayc_0(t_{i-1}, t_i)$ for a fixed rate $K$, then we call it as a fixed leg with a unit principal. It has a present value
\begin{align}
PV_{\text{fixed}}(t) &= K \sum_{i=1}^n \dayc_0(t_{i-1}, t_i) \Bond (t,t_i) .
\end{align}
\subsection{Coupon bearing bond}
A coupon bearing bond with floating coupons and a unit principal is combination of a floating leg and payment of one currency unit coinciding with the last tenor date. Thus it has present value of
\begin{align}
PV_{\text{floating bond}}(t) = \Bond (t,t_{0})
\end{align}
and especially $PV_{\text{floating bond}}(t_0) = 1$.
Similarly a coupon bearing bond with fixed coupons and a unit principal is combination of a fixed leg and payment of one currency unit coinciding with the last tenor date. It has a present value of
\begin{align}
PV_{\text{fixed bond}}(t) &= \Bond (t,t_n) + K \sum_{i=1}^n \dayc_0(t_{i-1}, t_i) \Bond (t,t_i) \\
&= \Bond (t,t_n) + PV_{\text{fixed}}(t) .
\end{align}
\subsection{Vanilla interest rate swap}
A vanilla payer interest rate swap (IRS) is a contract defined by paying a fixed leg and receiving a floating leg. A vanilla receiver interest rate swap (IRS) is a contract defined by paying a floating leg and receiving a fixed leg. The legs may have different amount of coupons. Also the coupons dates and day count conventions may not coincide. If $K$ is the common rate for the fixed leg and both legs have the same notional value, then a payer IRS has the present value of
\begin{align}
\sum_{i=1}^m \Bond (t,t'_i) \dayc_1(t'_{i-1}, t'_i) \Rflt(t'_{i-1}, t'_i) - K \sum_{i=1}^n \Bond (t,t_i) \dayc_0(t_{i-1}, t_i)
\end{align}
where $t'_0 < t'_1 < t'_2 < \ldots < t'_m$ are the coupon times for the floating leg. A par swap is a swap with present value of zero and the fixed rate for a par swap is
\begin{align}
K = \frac{ \sum\limits_{i=1}^m \Bond (t,t'_i) \dayc_1(t'_{i-1}, t'_i) \Rflt(t'_{i-1}, t'_i) }{ \sum\limits_{i=1}^n \Bond (t,t_i) \dayc_0(t_{i-1}, t_i) }
\end{align}
It is easy to see that if the both legs have same underlying notional principal and coupon dates are the same, then the swap is just a collection of forward rate agreements with a fixed strike price. A vanilla payer IRS let the payer to hedge interest rate risk by converting a liability with floating rate payments into fixed payments.
\subsection{Overnight indexed swap}
At the end of a banking day, banks and other financial institutions may face surplus or shortage of funds. They may lend the excess or borrow the shortfall on overnight market. Overnight lending rate is often regarded as a proxy for risk-free rate. In Euro area, European Central Bank calculates Eonia, which is a weighted average of all overnight unsecured lending transactions in the interbank market.
Overnight indexed swap (OIS) is a swap where a compounded reference overnight lending rate is exchanged to a fixed rate.
\subsection{Call and put option and call-put parity}
A European call (put) option gives the buyer the right but not an obligation to buy (sell) a designated underlying instrument from the option seller with a fixed price at expiry date. Thus a call option on $T$-bond with strike price $K$ and maturity $S < T$ has the final value
\begin{align}
\ZBC(S,S,T,K) = \left( \Bond(S,T) - K \right)^+
\end{align}
and the corresponding put option has the final value
\begin{align}
\ZBP(S,S,T,K) = \left( K - \Bond(S,T) \right)^+ .
\end{align}
A portfolio of long one call and short one put option on a same $T$-bond with identical strike price $K$ and maturity $S$ has final value of
\begin{align}
\left( \Bond(S,T) - K \right)^+ - \left( K - \Bond(S,T) \right)^+ = \Bond(S,T) - K.
\end{align}
Therefore, without any arbitrage, we have the so called call-put--parity
\begin{align}
\ZBC(t,S,T,K) - \ZBP(t,S,T,K) = \Bond(t,T) - \Bond(t,S) K
\end{align}
holds for all $t \leq S$.
\subsection{Caplet, cap, floorlet and floor}
In order to keep notation simplier, we assume that the present is $0$ and $0 < t < T$. A caplet is an interest rate derivative in which the buyer receives
\begin{align}
\left( \Rflt(t,T) - K \right)^+
\end{align}
at the time $T$, where $\Rflt(t,T)$ is some reference rate and $K$ is the fixed strike price. The fixing is done at when the contract is made.
Suppose that a firm must pay a floating rate $L$. By buying a cap with strike $K$ against $L$, the firm is paying
\begin{align}
L - \left( L - K \right)^+ = \min (L,K)
\end{align}
meaning that the highest rate will pay will be the strike rate $K$. Thus caps may be used to hedge interest rate risk.
Now
\begin{align}
\Rflt(t,T) - K &= \frac{1}{\dayc(t,T)} \left( 1+\dayc(t,T)\Rflt(t,T) - K^* \right) \\
&= \frac{1}{\dayc(t,T)} \left( \frac{1}{p(t,T)} - K^* \right)
\end{align}
where $K^* = 1+ \dayc(t,T)K$. Thus the value of a caplet at the time $t$ is
\begin{align}
p(t,T) \left( \Rflt(t,T) - K \right)^+ &= \frac{p(t,T)}{\dayc(t,T)} \left( \frac{1}{p(t,T)} - K^* \right)^+ \\
&= \frac{1}{\dayc(t,T)} \left( 1 - p(t,T) K^* \right)^+ \\
&= \frac{K^*}{\dayc(t,T)} \left( \frac{1}{K^*} - p(t,T) \right)^+ .
\end{align}
But this is the price of $\frac{K^*}{\dayc(t,T)}$ put options on a $T$-bond with strike price $\frac{1}{K^*}$ at the time of strike $t$. Thus we can price a caplet as a put option on a bond. As the price of a cap contains optionality, we must model the interest rates in order to price it.
A cap is a linear collection of caplets with the same strike price.
A floorlet is an derivate with the payment
\begin{align}
\dayc(t,T) \left( K - \Rflt(t,T) \right)^+
\end{align}
at the time $T$, where $\Rflt(t,T)$ is some reference rate with day-count convention $\dayc(t,T)$ and $K$ is the fixed strike price. Similarly a floor is a linear collection of floorlets with the same strike price. We can price a floorlet is the price of $\frac{K^*}{\dayc(t,T)}$ call options on a $T$-bond with strike price $\frac{1}{K^*}$ at the time of strike $t$.
\subsection{Swaption}
A swaption is an interest rate derivative that allows the owner the right but not an obligation to enter into an IRS. A payer swaption gives the owner the right to enter a payer swap (a swap paying a fixed rate while receiving floating rate). A receiver swaption gives the owner the option to initiate a receiver swaption (a swap paying a floating rate while receiving a fixed rate).
A European payer swaption is equivalent to a European put option on a coupon bearing bond. The underlying swap have the value of
\begin{align}
\Swap(S) = PV_{\text{float}}(S) - PV_{\text{fixed}}(S) .
\end{align}
at the time of the strike $S$. Thus
\begin{align}
\Swaption(S) &= \left( PV_{\text{float}}(S) - PV_{\text{fixed}}(S) \right)^+ \\
&= \left( 1 - \Bond (t,t_n) - PV_{\text{fixed}}(S) \right)^+ \\
&= \left( 1 - PV_{\text{fixed bond}}(S) \right)^+ .
\end{align}
We see that a swaption is a european put option on a fixed rate coupon bond. The coupon rate is the fixed rate of the underlying swap and strike price is the principal of the bond and the underlying swap.
In some cases we may price a swaption as a portfolio of options on zero-coupon bond. This trick was introduced in \cite{jamshidian1989anexactbondoptionformula}. We now denote the price of a zero coupon bond as a function of a short rate $\Bond(t,T,r)$. We consider a put option with maturity $S$ and strike price $K$ on a bond with coupond $c_i$ occuring at times $t_i$, $i=1,2, \ldots, n$. Let $r^*$ be the rate with the property
\begin{align}
K &= \sum_{i=1}^n c_i \Bond(S, t_i, r^*) .
\end{align}
Now the put option has a value
\begin{align}
\left( K - \sum_{i=1}^n c_i \Bond(S,t_i) \right)^+ &= \left( \sum_{i=1}^n c_i \left( \Bond(S, t_i, r^*) - \Bond(S,t_i,r(S)) \right) \right)^+ .
\end{align}
If we assume that the bond prices are uniformly decreasing function on the initial short rate, then the options will be exercised if and only if $r^* < r(S)$ and now
\begin{align}
\Bond(S, t_i, r^*) > \Bond(S,t_i,r(S)) .
\end{align}
for all $i$ Otherwise all $\Bond(S, t_i, r^*) \leq \Bond(S,t_i,r(S))$ for all $i$. Thus the put option has value
\begin{align}
\sum_{i=1}^n c_i \left( \Bond(S, t_i, r^*) - \Bond(S,t_i,r(S)) \right)^+
\end{align}
which is a portfolio of put options with maturities $S$ on a zero coupon bonds with strike prices of $\Bond(S, t_i, r^*)$. The assumption behind this trick assumes in essence that the prices of the zero coupon bonds moves in unison. This is satisfied by one-factor models but the assumption does not hold for multi-factor models.
Similarly, a European receiver swaption is equivalent to a European call option on a coupon bearing bond. Under the same assumption, we may disassemble a receiver swaption as a portfolio of call options on zero coupon bonds.
\iffalse
\subsection{Eurodollar futures}
Eurodollar futures is a contract that will swap
\begin{align}
1- \Rflt(T,S)
\end{align}
with
\begin{align}
1 - \FUT(t,T,S)
\end{align}
at the time $T$, where $t < T < S$ and the futures rate $\FUT(t,T,S)$ is set so that entering futures contract at the time $t$ costs nothing. Futures contracts are resettled continuously, meaning that for a small time horizon $\delta > 0$, the owner of a futures contract will have a cash flow of
\begin{align}
\FUT(s,T,S) - \FUT(s+\delta,T,S) .
\end{align}
In practice, resettlement is done daily. As $\FUT(T,T,S) = \Rflt (T,S)$, the holder of a futures contract has experienced undiscounted net cash flow of
\begin{align}
\sum\limits_{t \leq s < T} \left( \FUT(s,T,S) - \FUT(s+\delta,T,S) \right) = \FUT(t,T,S) - \Rflt (T,S) ,
\end{align}
but we note this number contains undiscounted cash flows from different point of times. If interest rates raises, the futures contract will lose value. On the other hand, falling interest rates will make contract more valuable. Assuming that the rates of different tenor move in unison, the refinancing cost of settling a move of $n>0$ basis points is higher than the benefits of reinvesting gains from a movement of $-n$ basis points.
\fi
\section{Defaultable instruments and credit default swaps}
\subsection{Defaultable $T$-bond}
A defaultable $T$-bond with no recovery (NR) is an instrument that pays
\begin{align}
\DBond(T,T) = \begin{cases} 1, & T < \default \\ 0, & T \geq \default \end{cases}
\end{align}
at the time $T$, where $\default$ is the time of a default of the underlying. The price of a defaultable $T$-bond at the time $t < T$ is denoted by $\DBond(t,T)$.
A defaultable $T$-bond with recovery of treasury (RT) has the same final payout is a defaultable $T$-bond with no recovery but in addition it pays $\delta \Bond(\default, T)$ if $\default \leq T$, where $0 < \delta < 1$. Thus it was a terminal value of
\begin{align}
\DBond(T,T) = \1_{ \{ \default > T \} } + \delta \1_{ \{ \default \leq T \} } .
\end{align}
A defaultable $T$-bond with recovery of face value (RFV) has the same final payout is a defaultable $T$-bond with no recovery but in addition it pays $\delta$ at the default if $\default \leq T$, where $0 < \delta < 1$. Thus it was a terminal value of
\begin{align}
\DBond(T,T) = \1_{ \{ \default > T \} } + \delta \1_{ \{ \default \leq T \} } \Bond(\default,T) .
\end{align}
A defaultable $T$-bond with recovery of market value (RMV) has the same final payout is a defaultable $T$-bond with no recovery but in addition it pays $\delta\DBond(\default,T)$ at the default if $\default \leq T$, where $0 < \delta < 1$. Thus it was a terminal value of
\begin{align}
\DBond(T,T) = \1_{ \{ \default > T \} } + \delta \1_{ \{ \default \leq T \} } \Bond(\default,T) .
\end{align}
\subsection{Credit default swap}
A credit default swap (CDS) is an instrument where the seller of the contract will compensate the buyer if the reference instrument or entity has a credit event such as a default. In exchange, the buyer will make periodic payments to the seller until the end of the contract or the default event. The buyer of CDS will be hedged against the credit risk of the reference entity. Originally physical settlement was used. If the credit event occurs before the maturity of the CDS, then the seller is obligated to buy the underlying reference debt for face value. Since the notional value of credit default swaps may be greater than the underlying debt, physical settlement is a cumbersome process and cash settlements are held instead. In order to determine the value of a contract after the default, a credit event auction is held to determine the recovery value $\Rec$ (\cite{ISDAbigbang}, \cite{BISquarterlyreview2010}).
Suppose that the CDS will offer protection from $S$ to $T$ and $\default$ is the time of the credit event. The protection seller has agreed to pay the buyer $\LGD = 1 - \Rec$ at the time $\default$ if $S \leq \default \leq T$. The protection leg of CDS has a value of
\begin{align}
\Protection(t) = \Bond(t, \default) \LGD \1_{ \{ S \leq \default \leq T \} }
\end{align}
at the time $t$. Let $S = t_0 < t_1 < t_2 < \ldots <t_n = T$. The premium leg will pay a coupon rate $C$ at the times $t_1 < t_2 < \ldots < t_n$ if the credit event has not occurred. If the credit event happes, then the buyer will pay the accrued premium rate at the time of the default. The premium leg has a value of
\begin{align}
\Premium(t, C) =\sum_{i=1}^n \Bond(t, t_i) \dayc(t_{i-1},t_i) C \1_{ \{ \default > t_i \} } + \Bond(t, \default) \dayc(t,\default) C \1_{ \{ t_s \leq \default \leq t_{s+1} \} }
\end{align}
where $t_s$ is the last date from $t_0 < t_1 < \ldots < t_n$ before the credit event (if it occurs).
Standardized CDS contracts have quarterly coupon payments and rates are usually set to be either $25$, $100$, $500$ or $1000$ basis points. So when traded the buyer will pay
\begin{align}
\Premium(0, C) - \Protection(0) .
\end{align}
Earlier the coupon rate $C$ was set so that $\Premium(0, C) = \Protection(0)$ and no money was exchanged at the trade.
| {
"alphanum_fraction": 0.6819620939,
"avg_line_length": 59.8883116883,
"ext": "tex",
"hexsha": "a42e1856258b3a978e3806000685ce5ba2a576d1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mrytty/gradu-public",
"max_forks_repo_path": "instruments.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mrytty/gradu-public",
"max_issues_repo_path": "instruments.tex",
"max_line_length": 920,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mrytty/gradu-public",
"max_stars_repo_path": "instruments.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7532,
"size": 23057
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% GKS User Guide -- LaTeX Source %
% %
% Chapter 1 %
% %
% The following external EPS files are referenced: %
% hbbatch.eps, hbookc11.eps %
% %
% Editor: Michel Goossens / CN-AS %
% Last Mod.: 14 July 1992 12:30 mg %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{\protect\label{sec:intro}Introduction}
As a matter of policy, a recommendation was made by HEPCCC to base
future HEP computer graphics applications on the ISO standard
Graphical Kernel System,
GKS \cite{bib-gks2d} and GKS-3D \cite{bib-gks3d}.
GKS had been in use already at CERN for 2D applications, but the decision
to use GKS also for 3D work led to a new tender operation
being carried out
in 1986. This resulted in the firm GTS-GRAL, of Darmstadt,
being selected to provide new implementations of both GKS and GKS-3D
(called {\it GKSGRAL} and {\it GKSGRAL-3D}).
These have been installed on all the major CERN
operating systems (VM/CMS, UNICOS, VMS, AEGIS and UNIX) and
the contract with GTS-GRAL includes
provision for institutes affiliated to CERN to obtain a licence for the
use of the software at very favourable rates.
GKSGRAL drivers are available for a large range of graphics terminals.
Users should be aware of the implications in testing and maintenance
work which has to be carried out on the very large number of
operating-system/driver combinations.
\section{What is GKS (the Graphical Kernel System)}
The Graphical Kernel System (GKS) \cite{bib-gks2d} is a document produced
by the International Standards Organization (ISO) which defines a common
interface to interactive computer graphics for application programs.
GKS has been designed by a group of experts representing the national
standards institutions of most major industrialized countries.
The full standard provides functional specifications for some
200 subroutines which perform graphics input and output
in a device independent way.
Application programs can thus move freely between different graphics
devices and different host computers.
For the first time graphics programs have become genuinely portable.
However, one should point out that GKS itself is not portable.
Individual GKS implementations will vary substantially as they have to
support different graphics devices on different computers.
Moreover, GKS is a {\it kernel} system, and thus does not include
an arbitrary collection of functions to produce histograms or
contour plots, etc. Such facilities are regarded as applications
which sit on top of the basic graphics package and, at CERN,
they are provided by the Graphical Extensions to the NAG Library
\cite{bib-nagref}, or the HPLOT package \cite{bib-HPLOT}.
In order to allow particular applications to choose a graphics package
with the appropriate capability, GKS has been defined to have
different {\it levels}. The level structure has two dimensions,
one for output (0, 1, or 2) and one for input (a, b, or c).
Higher levels include the capabilities of lower levels.
In the United States, ANSI has defined also a level 'm', for very simple
applications, which sits below output level '0'.
Most implementations provide all output (level '2') and intermediate
input (level 'b'). The reason input level 'c' is not usually supported
is that it requires asynchronous input facilities not found in all
operating systems.
\index{FORTRAN binding}
The GKS functions have been defined independently from a specific
programming language, and {\it bindings} to individual languages are
subject to separate standards efforts which have been undertaken
for all the major languages.
The FORTRAN binding is defined by \cite{bib-gksftn}.
The Graphical Kernel System for two dimensional graphics was
adopted as an ISO standard in 1985, and since that date work has been
in progress to define a three dimensional super-set \cite{bib-gks3d}
which was accepted as an International Standard during 1988.
The FORTRAN binding to GKS-3D has also been published as a
Draft International Standard \cite{bib-gksftn3}.
The GKS functions are separated into those which pass values to GKS for
control, setting or output, and those which inquire about
status information. There are 8 distinct classes:
\begin{center}
\begin{tabular}{ll}
1. Control functions&5. Transformations\\
2. Output Attributes&6. Input functions\\
3. Output Primitives&7. Metafile functions\\
4. Segment functions&8. Inquiry functions\\
\end{tabular}
\end{center}
\begin{note}
{\bf Throughout this document many values are specified using}
{\bf GKS Enumerated Types}. The symbolic names for these all
begin with 'G' and are enclosed in single quotes within the text.
(But do {\bf not} include the quotes in FORTRAN calls!)
They have been coded into an {\it Include File},
called ENUM.INC, which is listed in Appendix on Page~\pageref{sec:hdenum}.
\end{note}
\section{Major Differences to Earlier Graphics Interfaces}
In some respects GKS is a major departure from earlier software
interfaces to graphics display systems, and several new concepts have
been introduced. The following is a brief list of the new features,
which will be more fully described within the body of the document.
\begin{OL}
\index{current point}
\item GKS has no concept of a {\it current point}. This idea dates
from when most graphics devices were pen plotters, in which case the
current point corresponded clearly to the position of the pen.
However, it would be less obvious where one would define the current
point when drawing a filled polygon on a raster display.
Thus, all GKS primitives which require
a starting point specify that point in the function call.
\item In most cases earlier software interfaces could only control a single
device at a time, and if some means was provided to 'capture' the graphics
output for storage, then this was handled in a special way.
GKS simplifies this situation considerably by defining
{\it Workstations}, which correspond to 'virtual devices'.
GKS maps each workstation onto a real piece of hardware, but to the
application programmer they are all handled in the same way, and
several workstations can be active simultaneously.
There is a workstation which writes the output it receives onto a
{\it Metafile} for storage and/or transmission over a network
and this, also, is handled just like any other workstation.
\item GKS incorporates the concept of a {\it Logical Device} to
control input. There are six classes of Logical Input Devices, and these
are mapped onto the actual physical devices in use, without the application
programmer having to write special code if, for example, (s)he is using
a mouse instead of a tablet. This helps to make application code much
more portable. The following logical devices are of particular interest:
\begin{OL}
\item The Logical {\it Locator} Device, which returns the locator position
{\it in the user's coordinate system}, not that of the physical device.
\item The Logical {\it Pick} Device, which tells the application
at which object the user is pointing, without having to provide the code to
work it out.
\item The Logical {\it Choice} Device, which allows the user to
choose one option out of a set.
\end{OL}
\item GKS provides a mechanism to group the graphics output commands into
{\it Segments}, where they may be stored for later use.
Thus, an image stored in a segment could be made invisible and then visible
again without the application having to re-draw it.
Information sent to a workstation to produce an image which is not
stored is called {\it non-retained data}.
\item Finally, GKS provides a whole host of functions allowing the application
to {\it inquire} at run-time the state and/or capabilities of a
workstation or the implementation of GKS itself.
This is a very important feature for allowing code to be written in a
portable way, and still to make the best possible use of the environment in
which it is running.
\end{OL}
\section{\protect\label{sec:metint}Computer Graphics Metafiles (GKSM and CGM)}
A graphics metafile is a computer file containing a set of data records
describing a graphical image. It may be used for
\begin{OL}
\item Transporting graphical information between different types of computers.
\item Transporting graphical information from one site to another.
(by magnetic tape for example)
\item Transporting graphical information from one application to another.
\item Device spooling, e.g. for a plotter.
\end{OL}
\index{Appendix E metafile}
As yet there is no official ISO standard for writing a GKS Metafile (GKSM).
However, the ISO GKS Functional Description assumes the existence of one and,
in Appendix E of the document, a metafile format is described
and its use is recommended.
A GKS metafile created using this format is known as an Appendix E metafile.
Unfortunately, not all implementations follow the Appendix E format,
and so GKSM metafiles created by different GKS packages may be incompatible.
In fact, even different examples of Appendix E metafiles may be
incompatible due to variations in the file record structures, etc.
\index{Appendix E metafile!Computer Graphics Metafile}
\index{Appendix E metafile!CGM}
The Computer Graphics Metafile (CGM) has been produced by a separate
standards group within ISO and, when implementations become available,
it will be an alternative to the Appendix E metafile for the storage
and transmission of complete pictures.
One should note that because CGM is an independent standard
compatible with GKS, it is expected to become adopted as the
picture interchange format used between all graphics standards.
\section{The Computer Graphics Virtual Device Interface (CGI)}
\index{logical workstations}
One of the new features introduced by GKS was the concept of a
{\it Logical Workstation}.
This provides an interface layer within the graphics package below
which are situated a set of workstation drivers.
The workstations simulate in software any features which are not provided
by a particular display, and so simplify the problem of driving
new devices. To the applications programmer all workstations
have (more-or-less) similar capabilities, although their response
time will clearly be faster if these capabilities are built into the hardware,
rather than emulated. However, GKS defines only the interface as
seen by the application, and not the internal workstation interface.
This means that it is not possible to move workstation drivers
from one implementation of GKS to another.
There are difficulties in defining this interface because, if the
level is too high, then most workstation drivers must incorporate
a great deal of code. If on the other hand the level is too low,
then the software will not be able to make use of advanced features
in high-performance display systems.
There is thus is a trade-off to be made, which is currently
decided upon by the designers of each graphics package.
The goal of the CGI \cite{bib-cgiref} is to standardize this interface.
However, it is proving to be a lengthy business,
because there are difficulties to persuade all the
parties involved to compromise.
It should be mentioned that one of the goals of CGI is to allow
the functionality of the graphics system to be split between multiple
CPUs. For example, the features defined by the CGI could either be
built into a graphics terminal, or programmed into a Personal Workstation.
This type of functionality has become loosely termed
{\it Networked Graphics}.
\index{networked graphics}
\index{X-Window}
In this realm CGI may be overtaken by events, because a networked graphics
system called {\it X-Window} is fast becoming a 'de facto' standard.
X-Window comes from MIT, and is an outcome of the Athena Project
financed by DEC and IBM.
\section{Overview of Basic Facilities available at CERN}
\index{MGKS}
\index{PLOT10/GKS}
The following graphics services and facilities are supported at CERN
and will be described in more detail within this manual.
Note that PLOT10/GKS and MGKS are no longer supported.
\subsection{GKSGRAL}
\index{GKSGRAL}
\index{GKSGRAL-3D}
Both GKS (2D) and GKS-3D libraries from GTS-GRAL ({\it GKSGRAL} and
{\it GKSGRAL-3D}) are available for general use
on IBM VM/CMS, VAX VMS, CRAY UNICOS, APOLLO AEGIS, and UNIX.
However, in the case of UNIX, compiled libraries are only available from CERN
for those machines on which they may be produced.
As of March, 1990, the PROduction GKSGRAL version
is 7.4/3.2, and the GKSGRAL-3D version is 2.0.
\subsection{Include Files}
\index{include files!gtsdev}
\index{include files!enum}
To aid program writing two {\it INCLUDE FILES} are available:
\begin{DLtt}{123456}
\item[GTSDEV]This file contains a set of parameters
defining the GKS Workstation Types available on GKSGRAL.
\item[ENUM]This file contains a set of parameters
defining the GKS Enumeration Types. {\bf It is highly recommended}
that these are used in preference to directly coding in collections of
integers, the meaning of which is immediately forgotten.
\end{DLtt}
Although the use of include files is not standard FORTRAN-77,
this facility is supported by the compilers on all the major CERN systems.
To produce standard FORTRAN code the text of the include file must be inserted
into the application program.
\subsection{GKS Metafile Display and Editing}
\index{GRVIEW}
\index{GRVIEW}
\index{GKSTV}
\index{GKSED}
\index{metafiles}
\index{editor}
GKS Appendix E metafiles may be interpreted on display screens
using the interactive command {\bf GRVIEW}, which is available on
IBM, VAX and APOLLO systems. GRVIEW also allows metafiles to be edited,
and combines features from the old commands GKSTV and GKSED, which
it replaces.
\subsection{Hardcopy and Conversion Facilities}
\index{metafiles}
\index{hardcopy}
\index{conversion}
\index{postscript}
\index{GRPLOT}
\index{GRCONV}
\index{GKSVT}
\index{GKSCP}
\index{GKSX87}
\index{GKS3812}
\index{GKSSGML}
\index{VERSATEC}
\index{XEROX}
\index{IBM3812}
GKS Appendix E metafiles may be interpreted onto a range of hardcopy
devices using the command {\bf GRPLOT}, or converted to
another format using {\bf GRCONV}. These two commands have replaced
the old utilities GKSVT, GKSCP, GKSX87, GKS3812, and GKSSGML.
GRCONV runs locally to convert a GKS Appendix~E metafile into a PostScript
file or a file of Tektronix 4014 escape sequences. The command also may
be used to convert a metafile into IBM~3812 format for inclusion
in documents produced by SGML.
GRPLOT plots a metafile on a device specified by a parameter.
Supported devices include the computer centre monochrome and colour
Versatec plotters, the Xerox 4050s, and the IBM~3812s.
Apart from Appendix~E metafiles, it is possible to produce PostScript
or Tektronix 4014 output files, either directly from GKS, via GRVIEW,
or via GRCONV.
PostScript files may be printed on any local PostScript printer,
although they are less versatile than a metafile and may not be edited or
plotted on any other type of hardcopy device. Various laser printers
accept files of Tektronix 4014 escape codes as input.
\subsection{Mainframe Graphics using PCs}
\index{PCs}
\index{Emulators}
\index{TeemTalk}
After evaluation of several graphics emulators, the TeemTalk package
from Pericom has been selected as being suitable to allow IBM compatible
PCs to be used as graphics terminals. TeemTalk emulates Tektronix
4107/4207 colour terminals which are supported by GKS on all CERN
time-sharing systems, and it is possible for CERN to order licences at
very favorable rates.
\Lit{$==>$} ??? Those people who would like to obtain a licence should contact
R.~Cailliau or F.~Ovett of AS-MI.
\subsection{Documentation}
\index{documentation}
Apart from this {\bf GKS/GKS-3D at CERN}, there exists
\Lit{$==>$} To complete ...
the {\bf GRVIEW, GRCONV and GRPLOT; Metafile Utility User's Guide}
\cite{bib-grref}, + ...,
all of which are available from the UCO.
Copies of the {\bf GKSGRAL Reference Manual} \cite{bib-gtsref} and the
{\bf GKSGRAL-3D Reference Manual} \cite{bib-gtsref3} may be borrowed
from the Computer Science Library, purchased from GTS-GRAL,
or purchased from CERN under an agreement with GTS-GRAL
(see Appendix on Page~\pageref{sec:gtsdist} for details).
{\bf Note that the GKSGRAL manuals are covered by copyright.}
\index{HELP}
\index{FIND}
On-Line help files are available using the {\bf FIND~xxx} command on
IBM, the {\bf HELP~xxx} command on VMS,
and via the command {\bf HELP~CERN~xxx} on APOLLO.
Keywords (xxx) are:
GRAPHICS, GKS, GRPLOT, GRCONV, GRVIEW, and GKSPACK.
\subsection{The User Consultancy Office (UCO)}
\index{UCO}
\index{User Consultancy Office}
General graphics inquiries should be directed to the User Consultancy Office
on the ground floor of Building~513 (Tel:~4952,
Electronic Mail: \Lit{[email protected]}.
\chapter{\protect\label{sec:gkspr}GKS Primer}
\section{Workstations}
\index{workstation}
\subsection{Definition}
GKS defines the concept of an abstract {\it graphical workstation}
which provide the logical interface through which the application
program controls the physical devices.
A workstation can have one {\it display surface} and/or a collection
of {\it logical input devices}.
Thus, a particular interactive workstation may
belong to one of three classes: Output-Only, Input-Only,
or Input-Output. (There are a further two classes of
workstation: Metafile-Input and Metafile-Output.)
GKS allows several workstations to be open simultaneously,
for example, one for an interactive graphics terminal
and one for a metafile or hardcopy device.
The actual maximum number of simultaneously open workstations is
installation dependent; in the GTS-GRAL implementation it is set to 15.
GKS allows the appearance of output primitives to vary between
workstations in order to take advantage of their differing
capabilities. However, it is possible to inquire at run-time what
the actual capabilities are, and to design one's code appropriately.
The inquiry functions are too numerous to list in this Primer,
and the reader is referred to one of the texts in the bibliography.
\subsection{Opening and Closing GKS}
Before any workstation is {\it opened} or {\it activated},
GKS itself must be {\it opened}.
This must be the first call in any GKS program
as it performs the initialization.
The last call should be to {\it close} (exit from) GKS.
\index{GKS routine!{\protect\tt GOPKS}}
\index{GKS routine!{\protect\tt GCLKS}}
\index{error file}
\begin{XMP}
CALL GOPKS(ERRFIL, BUF)
.......
CALL GCLKS
\end{XMP}
ERRFIL defines the FORTRAN logical unit number to which all error
messages will be sent. BUF is the amount of memory space to be
used. (This parameter is ignored by the GKSGRAL and GKSGRAL-3D implementations.)
\subsection{Opening and Closing a Workstation}
\index{workstation!opening/closing}
\index{workstation!identifier}
Each workstation is identified in the application program by a
unique number, the {\it workstation identifier}. This number is used
in routines which communicate with the device, i.e. when output is to be
sent to it, or when it is to be cleared, etc. In order
to establish a connection between the application program and
a workstation, the workstation has to be {\it opened}.
When the workstation is {\it closed},
the connection is released, segments stored in Workstation Dependent
Segment Storage are lost, and no further interactions are possible.
\index{GKS routine!{\protect\tt GOPWK}}
\index{GKS routine!{\protect\tt GCLWK}}
\index{connection identifier}
\index{workstation!connection identifier}
\index{workstation!type}
The calls to open and close a workstation are:
\begin{XMP}
CALL GOPWK(WKID, CONID, WTYPE)
and
CALL GCLWK(WKID)
\end{XMP}
The workstation identifier WKID is a positive integer which the
application chooses, and is typically a number like 1,2,3...
The integer CONID is the Connection Identifier which connects the
workstation to an I/O channel. It is usually a FORTRAN logical unit
number, but depends on the workstation and the host computer being used.
Thus, the value of conid must be obtained from the
workstation-dependent documentation.
The {\it Workstation Type} (WTYPE) is an integer which specifies
which type of workstation should be opened,
e.g. a Tektronix 4014 or a Versatec plotter.
Workstation Types are implementation dependent.
A list of workstation identifiers in use at CERN will be found in the
appendix.
\subsection{Activating and Deactivating a Workstation}
\index{workstation!activation}
When primitives and attributes are output they are sent to all
{\it open} and {\it active} workstations.
When a workstation is {\it deactivated} no further
output is sent to it. At the end of the application program all open
workstations must be deactivated and closed before GKS itself can be closed.
\index{GKS routine!{\protect\tt GACWK}}
\index{GKS routine!{\protect\tt GDAWK}}
To activate or deactivate the workstation the calls are:
\begin{XMP}
CALL GACWK(WKID)
CALL GDAWK(WKID)
\end{XMP}
Note that Input-Only and Metafile-Input workstations may not be activated.
\subsection{Clearing the Workstation}
\index{workstation!update}
The application has control over clearing the display
surface at any time by calling:
\index{GKS routine!{\protect\tt GCLRWK}}
\begin{XMP}
CALL GCLRWK(WKID, COFL)
\end{XMP}
The integer COFL controls the way in which the update happens.
If COFL is set to 'GALWAY' (see note at bottom of page 1),
then the workstation is cleared even if there was no previous output.
If COFL is set to 'GCONDI', then the drawing surface is cleared only
if there has been previous output.
Clearing the workstation deletes all WDSS segments stored on that
workstation (segments are described in Chapter on Page~\pageref{sec:seghdr}),
and on a hardcopy device the paper is advanced.
For metafile interpretation by the CERN utilities this
function is used to indicate 'end of picture'.
Note that the workstation is cleared automatically when
it is opened. Some implementations also clear the workstation when it is
closed, but this is not mandated in the standard.
\subsection{\protect\label{sec:defsta}Deferral States}
\index{deferral states}
GKS allows the programmer to {\it defer} sending output to a
workstation by calling the function Set Deferral State.
For example, it might be more efficient to send
primitives in batches, rather than one at a time.
Thus, 'As Soon As Possible' often may not be as {\it fast} as possible!
For most users of GKS the default values should be
acceptable (but note that these are device-dependent).
The call is:
\index{GKS routine!{\protect\tt GSDS}}
\begin{XMP}
CALL GSDS(WKID, DEFMOD, REGMOD)
\end{XMP}
The second argument, DEFMOD, controls when output should be sent to
the workstation specified by WKID, and can take the following values:
\begin{DLtt}{123456}
\item['GASAP']
send output As Soon As Possible
\item['GBNIG']
send output Before Next Interaction Globally. This makes sure
that the workstation is up-to-date before the next input action
on {\it any} open workstation
\item['GBNIL']
send output Before Next Interaction Locally. This makes sure
that the workstation is up-to-date before the next input action
on the workstation specified in the call
\item['GASTI']
At Some TIme allows GKS to choose some opportune moment to
bring the workstation up-to-date
\end{DLtt}
The third argument, REGMOD, controls what should happen on
those workstations which require the image on the display to be
regenerated in some circumstances.
For example, to delete a segment on a direct view storage tube display,
such as a TEKTRONIX 4014, would require the screen
to be cleared and then the remaining contents re-drawn.
\index{implicit regeneration}
This is called {\it implicit regeneration}.
REGMOD can take the following values:
\begin{DLtt}{123456}
\item['GSUPPD']
Re-drawing is suppressed until an update is forced by calling
the Update Workstation function, GUWK, or by re-drawing all the
segments with GRSGWK.
\item['GALLOW']
Immediate re-drawing is allowed.
\end{DLtt}
\subsection{Updating the Workstation}
Because of the deferral mode set, not all the output may yet have been sent
to the display. The routine
\index{workstation!update}
\index{GKS routine!{\protect\tt GUWK}}
\begin{XMP}
CALL GUWK(WKID, REGFL)
\end{XMP}
\index{non-retained data}
\index{implicit regeneration}
is provided to bring the contents of the workstation WKID up-to-date.
The argument REGFL can take the values 'GPOSTP' or 'GPERFO'.
If 'GPOSTP' is specified, then all deferred actions are performed
at some time.
If 'GPERFO' is specified, then all deferred actions are performed,
the workstation transformation is updated,
the display surface is cleared, and all segments are
re-drawn as necessary. If an implicit regeneration of the image
has to be performed then non-retained data (primitives not stored
in segments) will be lost.
Note that a call to GUWK will {\bf not} necessarily clear
the workstation and re-draw all segments from scratch.
However, this can be achieved by calling the function GRSGWK
(see section on Page~\pageref{sec:grsgwk}).
\subsection{Sending Messages/Prompts}
\index{messages}
\index{prompts}
In order to display messages and prompts on the display surface
the following routine is provided:
\index{GKS routine!{\protect\tt GMSG}}
\begin{XMP}
CALL GMSG(WKID, STR)
\end{XMP}
WKID specifies on which workstation the text string STR should appear.
Where and how the message is written out is both implementation
and workstation-dependent.
\section{\protect\label{sec:dprim}The Drawing Primitives}
\index{primitives!2D}
Drawing primitives are the basic elements of graphics output, such as
lines, text etc.
GKS supports the six output primitives which are described below:
{\it polyline, polymarker, fill area, text, cell array},
and the {\it generalised drawing primitive}.
Each primitive has a corresponding set of {\it attributes}
e.g. line type, colour index, character height, etc.
The appearance of an individual primitive is governed by the
attributes in force at the time when the primitive is passed to GKS,
and attribute setting will be described in the following chapter.
A number of example programs may be found in Appendix on Page~\pageref{sec:exmpref}
which illustrate the use of the following features.
\begin{figure}[h]
\caption{Examples of some GKS Primitives}
\label{fig:prims}
\end{figure}
\begin{note}
The Polyline, Polymarker, and Fill Area primitives are specified by an
array of points which may not be arbitrarily large, and the maximum
size of this array is implementation-dependent. The GKSGRAL
implementation at CERN restricts the number points in a Fill Area to
300. However, calls to Polyline or Polymarker can handle more points
than this by internally sub-dividing the array.
\end{note}
\subsection{Polyline}
\index{polyline}
This draws line segments between two or more points using the
currently set attributes for line style, polyline colour index and
line width scale factor. The call is:
\index{GKS routine!{\protect\tt GPL}}
\begin{XMP}
CALL GPL(N, PXA, PYA)
\end{XMP}
where N is the number of points, and PXA(N), PYA(N) are real
arrays containing the X and Y values of the points in the
application program's own coordinate system (called the
{\it World Coordinates System}).
\subsection{Polymarker}
\index{polymarker}
This marks a sequence of points with the marker symbol selected by the
currently set attributes for polymarker colour index, marker type and
marker size scale factor. The marker size to which the scale factor
is applied is workstation-dependent.
The call is:
\index{GKS routine!{\protect\tt GPM}}
\begin{XMP}
CALL GPM(N, PXA, PYA)
\end{XMP}
where N, PXA and PYA have the same meanings as for GPL.
\subsection{Text}
\index{text}
Text is displayed using the current attribute settings for text colour
index, text font and precision, character-up vector, text alignment,
character expansion factor, character spacing, text path and
character height. The call is:
\index{GKS routine!{\protect\tt GTX}}
\begin{XMP}
CALL GTX(PX, PY, STR)
\end{XMP}
where PX and PY are real numbers which define in World Coordinates the
starting position of the text string STR.
\subsection{Fill Area}
\index{fill area}
Fill Area draws an area which is specified by a polygon.
The interior of the polygon can be either not filled (hollow), filled
with a solid colour, or filled with various patterns.
The fill area style and colour is specified by the current setting of
the fill area colour index, fill area style and fill area style index
attributes. The call is:
\index{GKS routine!{\protect\tt GFA}}
\begin{XMP}
CALL GFA(N, PXA, PYA)
\end{XMP}
where N, PXA, PYA have the same meaning as for GPL and GPM.
If the first and last points are different, they are joined by a line.
\subsection{Cell Array}
\index{cell array}
The Cell Array is an array of rectangular cells with individual
colours. It serves for passing raster images to GKS.
Note that in general, colour values within the Cell Array do not
correspond to picture elements (pixels) on a hardware display,
and that all primitives, including Cell Arrays, are subject to
the usual GKS transformations.
The call is:
\index{GKS routine!{\protect\tt GCA}}
\begin{XMP}
CALL GCA(PX,PY,QX,QY,DIMX,DIMY,ISC,ISR,DX,DY,COLIA)
\end{XMP}
Neither the Cell Array primitive nor the function call arguments will be
described in detail in this Primer, and the reader is referred to the GKSGRAL
manual, or one of the texts in the bibliography, for more information.
\subsection{Generalised Drawing Primitive}
\index{generalised drawing primitive}
\index{GDP}
Although at present GKS defines only six types of graphics primitives,
the standard allows for these to be augmented within particular
implementations by using {\it Generalised Drawing Primitives} (GDPs).
Thus, the Generalised Drawing Primitive may be thought
of as a 'standard way to be non-standard', and in fact a mechanism
exists for implementors of GKS to register GDPs with the International
Standards Organization group responsible for GKS.
The intention is that these registered GDPs will be incorporated as new
primitives within future revisions of the standard.
However, note that the use of GDPs may make programs
{\bf non-portable to other GKS implementations}. On the other
hand, they do permit the GKS driver to make use of special hardware
features, circle drawing for example, if these features are available.
GKSGRAL supports 6 GDPs:
\index{circle GDP}
\index{arc GDP}
\index{ellipse GDP}
\index{Bezier curve GDP}
\index{cubic curve GDP}
\begin{OL}
\item Circle
\item Circular Arc
\item Ellipse
\item Elliptical Arc
\item Bezier Curve defined by Bezier Polygon
\item Cubic Curve defined by Interpolation Points
\end{OL}
The call is:
\index{GKS routine!{\protect\tt GGDP}}
\begin{XMP}
CALL GGDP(N, PXA, PYA, PRIMID, IDR, DATREC)
\end{XMP}
See the GKSGRAL Manual for more details.
\section{\protect\label{sec:attrbs}The Output Attributes}
\index{attributes}
Before actually outputting any primitives, the application will want to
specify the exact way in which the drawing appears on the display
surface. This {\it rendering} process is controlled
by {\it output attributes}.
Output primitives have geometric and non-geometric attributes.
Geometric attributes, such as the character height,
affect the size and shape of a primitive, whereas non-geometric
attributes are qualities such as colour, line style, etc.
Output attributes affect the appearance or rendering of primitives at the
moment when the primitives are sent to GKS, and attributes are said to be
{\it bound} to primitives at this time.
Thus, modification of an attribute has no effect on primitives which
have been output already.
GKS attributes are said to be {\it modal} in character because,
after setting an attribute, GKS is in a mode in which the value of that
attribute will be bound to all primitives of the appropriate type which
follow. Setting a different value for the attribute would then
change the mode.
\subsection{Attribute Bundles}
\index{bundled attributes}
\index{individual attributes}
\index{bundle index}
There are two ways of specifying attributes,
{\it Bundled} and {\it Individually}.
Attributes may be set individually by calling the appropriate routines
one at a time. As an example, for a polyline one could set the
line style, the line width, and the colour by calling the routines
GSLN, GSLWSC, and GPLCI, before calling GPL to output the polyline.
Setting attributes individually will ensure that
the primitives to which they are bound appear the same on all
workstations, assuming that the workstations have sufficient capability.
However, if attributes are set using bundles, then the results will
be workstation-dependent.
Bundled attributes are assigned by selecting a {\it bundle index}
for a particular type of primitive using the routines given in section
on Page~\pageref{sec:setbnd}. The bundle index points to an entry in the
appropriate workstation {\it bundle table}.
Hence, each workstation has a bundle table for every primitive type,
and each entry in a bundle table contains a pre-defined set of
attributes for that particular primitive.
For example, the first entry in the polyline bundle table may contain
the attributes {\it dotted} and {\it red}, the second may
contain {\it solid, double thickness} and {\it blue}, etc.
Note that attribute bundles do
{\bf not} contain geometric attributes.
Beginners are advised to ignore attribute bundles and to set each
attribute individually. However, read the next section on
Aspect Source Flags before trying out a program.
As an example of why one might make use of attribute bundles,
consider an application which sometimes uses a colour terminal and
sometimes a monochrome one.
By drawing polylines with, say, bundle table index 5, the actual
appearance of the polylines will depend on the contents of
polyline bundle 5 on the two workstations. Thus, the application
can arrange to distinguish the polylines by using a particular
colour on the colour terminal, and a particular dash pattern
on the monochrome terminal, without making changes to the body of the
code which draws the primitives.
By using attribute bundles to specify attributes, and assuming that
the primitives have been stored in segments
(segments are described in Chapter on Page~\pageref{sec:seghdr}),
the application can also change the appearance of primitives
{\it after} they have been output to a workstation by
re-defining the contents of the bundle table.
This effect can not be achieved if the
attributes are set individually without deleting and re-drawing
the primitives.
\subsubsection{Aspect Source Flags}
\index{ASFs}
\index{aspect source flags}
To control whether a primitive attribute should be set individually,
or using a bundle table index, each primitive has a set of attributes
called the {\it Aspect Source Flags} (ASFs);
one flag for each primitive attribute.
If the ASF for a particular attribute is set to 'GBUNDL',
then the primitive will be bound to the attributes in the bundle table
entry pointed to by the bundle index currently in force.
If the ASF for a particular attribute is set to 'GINDIV',
then the primitive will be bound to the current individual attribute values
for that type of primitive.
Unfortunately, the committee which designed GKS could not agree on
whether the default setting for the ASFs should be bundled or individual.
Thus, American implementations, such as PLOT10/GKS, tend to default
the ASFs to set attributes individually, whilst European implementations,
such as GKSGRAL, tend to default ASFs to bundled attributes.
In order to be safe, {\bf users of GKS are advised to set their own default
values for the ASFs} as soon as they open GKS.
This can be done by calling:
\index{GKS routine!{\protect\tt GSASF}}
\begin{XMP}
CALL GSASF(ASFLST)
\end{XMP}
where ASFLST is an array of 13 integers, one for each attribute,
which must be set to 'GBUNDL' or 'GINDIV'.
The bundle table index for each attribute is given in parentheses
in the attribute list below.
An example program which calls GSASF may be
found in Appendix on Page~\pageref{sec:expfa}.
\subsubsection{Defining Attribute Bundles}
It is possible to define the entries in the bundle tables by calling one
of the following routines:
\index{GKS routine!{\protect\tt GSPLR}}
\index{GKS routine!{\protect\tt GSPMR}}
\index{GKS routine!{\protect\tt GSTXR}}
\index{GKS routine!{\protect\tt GSFAR}}
\index{GKS routine!{\protect\tt GSPAR}}
Set {\bf polyline} representation:
\begin{XMP}
GSPLR(WKID, PLI, LN, LWSC, PLCI)
\end{XMP}
Set {\bf polymarker} representation
\begin{XMP}
GSPMR(WKID, PMI, MK, MKSC, PMCI)
\end{XMP}
Set {\bf text} representation
\begin{XMP}
GSTXR(WKID, TXI, TXF, TXP, CHXP, CHSP, TXCI)
\end{XMP}
Set {\bf fill area} representation
\begin{XMP}
GSFAR(WKID, FAI, FAIS, FASI, FACI)
\end{XMP}
Set {\bf pattern} representation
\begin{XMP}
GSPAR(WKID, PAI, DIMX, DIMY, NCS, NRS, DX, DY, PACI)
\end{XMP}
As arguments, each routine requires the workstation identifier (WKID)
and bundle index (PLI, etc.) to be set, plus a value for each of the
non-geometric attributes for that particular primitive,
and which are listed below.
Details of GSPAR will not be given in this Primer; see the GKSGRAL manual
or one of the references in the bibliography for more information.
\subsection{\protect\label{sec:attlst}The List of GKS Attributes}
\index{attributes!list of}
\subsubsection{Individual Attributes}
\begin{UL}
\item {\bf POLYLINE}
\index{polyline}
\begin{DLtt}{123456}
\item[LN]
\index{GKS routine!{\protect\tt GSLN}}
(integer) the polyline line style (ASF 1). Set by GSLN(LN).
\item[LWSC]
\index{GKS routine!{\protect\tt GSLWSC}}
(real) the line width scale factor (ASF 2). Set by GSLWSC(LWSC).
\item[PLCI]
\index{GKS routine!{\protect\tt GSPLCI}}
(integer) the polyline colour index (ASF 3). Set by GSPLCI(PLCI).
\end{DLtt}
\item {\bf POLYMARKER}
\index{polymarker}
\begin{DLtt}{123456}
\item[MK]
\index{GKS routine!{\protect\tt GSMK}}
(integer) the polymarker type (ASF 4). Set by GSMK(MK).
\item[MKSC]
\index{GKS routine!{\protect\tt GSMKSC}}
(real) the marker size scale factor (ASF 5). Set by GSMKSC(MKSC).
\item[PMCI]
\index{GKS routine!{\protect\tt GSPMCI}}
(integer) the polymarker colour index (ASF 6). Set by GSPMCI(PMCI).
\end{DLtt}
\item {\bf TEXT}
\index{text}
\begin{DLtt}{123456}
\item[TXF]
\index{GKS routine!{\protect\tt GSTXFP}}
(integer) the text font (ASF 7). Set by GSTXFP(TXF, TXP).
\item[TXP]
(enumerated) the text precision (ASF 7). Set by GSTXFP(TXF, TXP).
\item[CHXP]
\index{GKS routine!{\protect\tt GSCHXP}}
(real) the character expansion factor (ASF 8). Set by GSCHXP(CHXP).
\item[CHSP]
\index{GKS routine!{\protect\tt GSCHSP}}
(real) the character spacing (ASF 9). Set by GSCHSP(CHSP).
\item[TXCI]
\index{GKS routine!{\protect\tt GSTXCI}}
(integer) the text colour index (ASF 10). Set by GSTXCI(TXCI).
\item[CHUP]
\index{GKS routine!{\protect\tt GSCHUP}}
(real) the character up vector. Set by GSCHUP(CHUX, CHUY).
\item[TXAL]
\index{GKS routine!{\protect\tt GSTXAL}}
(enumerated) the text alignment. Set by GSTXAL(TXALH, TXALV).
\item[TXP]
\index{GKS routine!{\protect\tt GSTXP}}
(enumerated) the text path. Set by GSTXP(TXP).
\item[CHH]
\index{GKS routine!{\protect\tt GSCHH}}
(real) the character height. Set by GSCHH(CHH).
\end{DLtt}
\item {\bf FILL AREA}
\index{fill area}
\begin{DLtt}{123456}
\item[FAIS]
\index{GKS routine!{\protect\tt GSFAIS}}
(enumerated) the fill area interior style (ASF 11). Set by GSFAIS(FAIS).
\item[FASI]
\index{GKS routine!{\protect\tt GSFASI}}
(integer) the fill area style index (ASF 12). Set by GSFASI(FASI).
\item[FACI]
\index{GKS routine!{\protect\tt GSFACI}}
(integer) the fill area colour index (ASF 13). Set by GSFACI(FACI).
\end{DLtt}
\item {\bf PATTERN}
\begin{DLtt}{123456}
\index{pattern}
\item[PA]
\index{GKS routine!{\protect\tt GSPA}}
(real) the pattern size. Set by GSPA(PASZX, PASZY).
\item[PARF]
\index{GKS routine!{\protect\tt GSPARF}}
(real) the pattern reference point. Set by GSPARF(RFX, RFY).
\end{DLtt}
\end{UL}
\subsubsection{\protect\label{sec:setbnd}Bundled Attributes}
\begin{UL}
\item {\bf POLYLINE}
\index{polyline}
\begin{DLtt}{123456}
\item[PLI]
\index{GKS routine!{\protect\tt GSPLI}}
(integer) the polyline bundle index. Set by GSPLI(PLI).
\end{DLtt}
\item {\bf POLYMARKER}
\index{polymarker}
\begin{DLtt}{123456}
\item[PMI]
\index{GKS routine!{\protect\tt GSPMI}}
(integer) the polymarker bundle index. Set by GSPMI(PMI).
\end{DLtt}
\item {\bf TEXT}
\index{text}
\begin{DLtt}{123456}
\item[TXI]
\index{GKS routine!{\protect\tt GSTXI}}
(integer) the text bundle index. Set by GSTXI(TXI).
\end{DLtt}
\item {\bf FILL AREA}
\index{fill area}
\begin{DLtt}{123456}
\item[FAI]
\index{GKS routine!{\protect\tt GSFAI}}
(integer) the fill area bundle index. Set by GSFAI(FAI).
\end{DLtt}
\end{UL}
\subsection{Specifying Line Styles For Polylines}
\index{polyline styles}
It is possible to draw polylines {\it solid, dashed, dotted}
or {\it dashed-dotted} ('GLSOLI', 'GLDASH', 'GLDOT', 'GLDASD').
For example:
\index{GKS routine!{\protect\tt GSLN}}
\begin{XMP}
CALL GSLN(GLDASH) - sets the line style to dashed
\end{XMP}
The different line styles available are shown in \ref{fig:linstyl}.
\begin{figure}[h]
\caption{GKS line styles}
\label{fig:linstyl}
\end{figure}
.pa
It is also possible to specify a scale factor which modifies the nominal
width of lines on the workstation. For example:
\index{GKS routine!{\protect\tt GSLWSC}}
\begin{XMP}
CALL GSLWSC(2.0)
\end{XMP}
should double the nominal line width. Note, however, that this (Real) parameter
is implementation-dependent, and may be ignored by terminal drivers as it
is both difficult and expensive to emulate in software if the device
does not support the feature in hardware.
\subsection{Specifying Marker Types for Polymarkers}
\index{polymarker types}
The five GKS Marker types, {\it point, plus, asterisk, circle}
and {\it cross} ('GPOINT', 'GPLUS', 'GAST', 'GOMARK', 'GXMARK'),
are demonstrated in \ref{fig:mark}.
For example:
\index{GKS routine!{\protect\tt GSMK}}
\begin{XMP}
CALL GSMK(GPOINT) - sets the marker type to point
\end{XMP}
\begin{figure}[h]
\caption{GKS marker types}
\label{fig:mark}
\end{figure}
Markers may be scaled in size by calling GSMKSC. For example:
\index{GKS routine!{\protect\tt GSMKSC}}
\begin{XMP}
CALL GSMKSC(3.5)
\end{XMP}
will scale the following markers by 3.5 times. The implementation of this
function is workstation-dependent. In particular, markers drawn on terminals
by hardware may only exist in certain fixed sizes.
\subsection{Specifying Fill Areas}
\index{fill area}
\index{hatching}
Many applications need shaded or coloured areas as well as lines and points.
GKS allows one to draw such an area by specifying an array of points
representing a closed polygon. If the last point in the array is not the
same as the first, these two will be joined.
The interior style of the area can be set to one of the four values
{\it hollow, solid, pattern}, and {\it hatch},
which are demonstrated in \ref{fig:fill}
('GHOLLO', 'GSOLID', 'GPATTR', 'GHATCH').
Examples for setting fill area interior style are:
\index{GKS routine!{\protect\tt GSFAIS}}
\begin{XMP}
CALL GSFAIS(GHOLLO)
or
CALL GSFAIS(GSOLID)
\end{XMP}
For interior style hollow the boundary polygon only is drawn as a solid line.
For style {\it solid} the interior is completely filled with a uniform
colour, as specified by the fill area colour index set by calling GSFACI.
\index{GKS routine!{\protect\tt GSFACI}}
Workstations for devices which support area filling of polygons by hardware
should normally make use of this feature.
However, this is not always possible, as some monochrome terminals do not
use the correct algorithm to perform the area fill.
For interior styles {\it hatch} and {\it pattern},
the particular hatch algorithm or pattern used may be chosen by specifying
a fill area style index.
This represents a second level of selection on the way the area is filled,
and the index points into either a hatch or pattern table stored at the
workstation. Thus, hatches and patterns are workstation-dependent.
The fill area style index is set by calling:
\index{GKS routine!{\protect\tt GSFASI}}
\begin{XMP}
CALL GSFASI(FASI)
\end{XMP}
where FASI is an integer value. To find out the effect of setting a particular
index, it is necessary to consult the workstation documentation.
\index{GKS routine!{\protect\tt GUSHTR}}
\index{GKS routine!{\protect\tt GSPARF}}
\index{GKS routine!{\protect\tt GSPAR}}
\index{GKS routine!{\protect\tt GSPA}}
The GKS standard provides calls to modify patterns by setting the pattern
reference point (GSPARF) and pattern size (GSPA).
The pattern representation attribute bundle is set using GSPAR.
The GKSGRAL package also allows the possibility to define one's
own hatch styles by calling the routine GUSHTR,
although this is not a standard GKS feature.
None of these routines will be described further in this Primer,
and the reader is referred to the texts in the bibliography
for more information.
\begin{figure}[h]
\caption{GKS fill area styles}
\label{fig:fill}
\end{figure}
\subsection{Specifying Text}
\index{text}
\index{converting numeric variables}
\index{numeric variables}
Text is the most complex of the GKS primitives because there are
so many different variations. The text attributes are described below.
Note that only the non-geometric attributes may be set in the text
bundle table.
({\bf HINT:} For FORTRAN programmers, it is possible to use the
{\it Internal Write} construct to convert numeric variables to character
strings for output as text primitives, see section on Page~\pageref{sec:refintw}).
\subsubsection{Font and Precision}
\index{text!fonts}
\index{text!precision}
\index{string precision text}
\index{character precision text}
\index{stroke precision text}
The {\it text font} is specified by an integer font number,
and fonts are workstation-dependent.
Any text font which is not supported on a given workstation is defaulted
to 1. This text font is a simple, roman type font produced by stroking
out each character using line segments.
To find out which fonts are provided by a particular implementation
one should consult the relevant reference manual.
The {\it text precision} attribute determines how closely the chosen
font has to obey the specifications of the other text attributes.
The choices for text precision are:
{\it string, char} and {\it stroke}
('GSTRP', 'GCHARP', 'GSTRKP').
In {\it string} precision the text string should be placed as close
as possible to the position specified by the primitive call.
None of the other text attributes need be taken into account
except the approximate size specified by character height.
Thus, if string precision is specified, the implementation is usually free
to make use of hardware characters which can be drawn much faster than
generating the fonts in software.
If {\it char} precision together with a workstation-provided font is
chosen, GKS will try a workstation dependent approximation.
In {\it stroke} precision GKS has to follow precisely the attribute
specifications, and always defaults to font 1
if the requested font does not exist.
Note that an implementation of GKS {\it may} provide stroke precision
text on a particular workstation even if the application specified only
a lower precision.
Both the text font and the text precision are set by the same routine:
\index{GKS routine!{\protect\tt GSTXFP}}
\begin{XMP}
CALL GSTXFP(FONT, PREC)
\end{XMP}
\begin{figure}[h]
\caption{Text fonts, precisions and up vectors}
\label{fig:fonts}
\end{figure}
\subsubsection{Expansion Factor}
\index{character!expansion factor}
The character expansion factor, a real number, causes each character to
appear 'fatter' or 'thinner' than normal. The default value is 1.0.
The height of the character is not affected, nor is the space
between the characters.
\index{GKS routine!{\protect\tt GSCHXP}}
\begin{XMP}
CALL GSCHXP(CHXP)
\end{XMP}
\subsubsection{Spacing}
\index{character!spacing}
The character spacing attribute defines the amount of extra
blank space to be inserted between adjacent characters of a text string.
This is a real number defined as a fraction of the character height.
The default value is 0.0. A positive character spacing spreads the
letters out, a negative one makes the letters overlap.
\index{GKS routine!{\protect\tt GSCHSP}}
\begin{XMP}
CALL GSCHSP(CHSP)
\end{XMP}
\subsubsection{Character-Up-Vector}
\index{character!up-vector}
The char-up vector defines the orientation of the text.
The text is written from left to right along a line perpendicular to the
char-up vector, which is specified by its X and Y components
(two real numbers):
\index{GKS routine!{\protect\tt GSCHUP}}
\begin{XMP}
CALL GSCHUP(CHUX, CHUY)
\end{XMP}
The effect of choosing different combinations of text font,
text precision and character-up-vector is shown in \ref{fig:fonts}.
\subsubsection{Alignment}
\index{text!alignment}
The text string as a whole is positioned relative to the point specified
in the GTX primitive call according to the current setting of the text
alignment attribute. The call to GSTXAL has two parameters for
horizontal and vertical alignment.
\index{GKS routine!{\protect\tt GSTXAL}}
\begin{XMP}
CALL GSTXAL(TXALH, TXALV).
\end{XMP}
The horizontal alignment is either {\it normal, left edge, centre},
or the {\it right edge} of the text string
('GAHNOR', 'GALEFT', 'GACENT', 'GARITE').
The vertical alignment is either {\it normal, top, cap line, half,
base line}, or {\it bottom}
(GAVNOR', 'GATOP', 'GACAP', 'GAHALF', 'GABASE', 'GABOTT').
The alignment attribute is illustrated in \ref{fig:align}.
The selection of {\it normal} for either horizontal or vertical
alignment results in GKS choosing the most appropriate value
depending on the current Text Path.
\begin{figure}[h]
\caption{Text alignment}
\label{fig:align}
\end{figure}
\subsubsection{Text Path}
\index{text!path}
The text path describes the direction in which the text string is written.
The possible values are {\it Right, Left, Up} and {\it Down}
('GRIGHT', 'GLEFT', 'GUP', ' GDOWN').
The default for the text path is perpendicular to the up vector (i.e. 'GRIGHT').
\index{GKS routine!{\protect\tt GSTXP}}
\begin{XMP}
CALL GSTXP(TXP)
\end{XMP}
\subsubsection{Height}
\index{character!height}
The character height is set in {\bf World Coordinates} by calling
GSCHH. Increasing the height automatically causes a corresponding
increase in the width of the character.
\index{GKS routine!{\protect\tt GSCHH}}
\begin{XMP}
CALL GSCHH(CHH)
\end{XMP}
Note that the use of World Coordinates to specify the character height may
cause problems if the normalization transformation chosen has a very large
or very small range in Y. In this case it is possible that when using a
metafile as an output workstation the number of digits required to
specify the character height may be more than the range used to define
real numbers within the metafile. If it would be inconvenient to modify
the range of World Coordinate Y values, then another possible solution to
the problem is to superimpose two viewports, one on top of the other.
All characters may then be drawn on one of the viewports which has been
defined using a more reasonable normalization transformation.
\subsection{Specifying Colour}
\index{colour!index}
Colour is specified separately for each primitive type by a colour index value.
\index{GKS routine!{\protect\tt GSPLCI}}
\begin{DLtt}{123456}
\item[Polyline]CALL GSPLCI(PLCI)
\item[Polymarker]CALL GSPMCI(PMCI)
\item[Text]CALL GSTXCI(TXCI)
\item[Fill Area]CALL GSFACI(FACI)
\end{DLtt}
The {\it colour index}, instead of defining a colour directly, points to
an entry in a {\it colour look-up table} which is workstation-dependent.
If the colour is an attribute of a primitive, then one can specify a colour
index either as an individual attribute or as part of a bundle table.
The size of the colour table is workstation-dependent, but
the table always contains the entries 0 and 1.
The background colour is 0, and the default foreground colour is 1.
Each entry greater than 1 defines an additional foreground colour.
If a colour index greater than the maximum is
specified, then the default value 1 is taken, which ensures that a program
written using colour will run on a monochrome device.
\index{colour!look-up table}
To set entries in the colour look-up table
one must call the function Set Colour Representation (GSCR).
It takes as parameters the workstation identifier, the colour index
to be set, and a value (from 0 to 1.0) for each of the red, blue, and green
intensities. The workstation then uses the closest available
colour to that which has been requested.
\index{GKS routine!{\protect\tt GSCR}}
\begin{XMP}
CALL GSCR(WKID, INDEX, RED, GREEN, BLUE)
\end{XMP}
The highest possible value for the colour index (NCOLI)
depends on the capabilities of the device and can be inquired using
GQCF and specifying the workstation type:
\index{GKS routine!{\protect\tt GQCF}}
\begin{XMP}
CALL GQCF(WTYPE, ERRIND, NCOLI, COLA, NPCI)
\end{XMP}
The parameter COLA ('GMONOC' or 'GCOLOR') indicates whether or not colour
is available, and NPCI specifies the number of pre-defined colour indices.
The colour index can also be used if part of a previously drawn picture
is to be erased. The part to be erased should be re-drawn in
the background colour (index=0).
All other attribute settings and transformations must be
exactly as they where when the picture was produced initially.
However, even in this case, the trick may not work if the primitive
being erased overlays another primitive, and not just the background.
Colour may be part of a fill area pattern, in which case different parts
of the pattern may be defined to be different colours.
For the cell array primitive an array of colours is specified
as part of the primitive itself.
\section{Coordinates and Transformations}
\index{transformations}
\index{coordinate systems}
\index{world coordinates}
\index{normalized device coordinates}
\index{device coordinates}
GKS defines three coordinate systems:
\begin{OL}
\item WORLD Coordinates (WC)
\item NORMALIZED DEVICE Coordinates (NDC)
\item DEVICE Coordinates (DC)
\end{OL}
The application program specifies the coordinates of points in primitive
calls using a cartesian coordinate system with whichever scale
is the most convenient.
This system is called the {\it World Coordinate System}.
The WC range can go from negative to positive infinity along both axes,
which gives the application an unlimited choice of coordinates.
The definition of the WC space is independent of any graphics
output device and can be different for each application as the
requirements dictate.
All world coordinates are transformed by GKS to a 'virtual' device
space, the {\it Normalized Device Coordinate} (NDC) space.
The NDC space ranges from 0 to 1 in both x and y.
This space is device independent and appears identical for all
workstations in the system. A transformation from WC to NDC is called a
{\it Normalization Transformation}.
GKS allows more than one such transformation to be defined,
and so an application may draw each part of a picture in
its own WC system, and then map them into a single NDC space.
See \ref{fig:windows} and \ref{fig:trafos}.
The third coordinate system is that of the hardware device.
GKS uses specific information about the graphics output device
to transform normalized device coordinates into
coordinates that the device itself understands. Usually the units
in the device dependent space will be metres or raster units.
The translation from NDC to DC is called the
{\it Workstation Transformation}.
\index{window}
\index{viewport}
\index{normal!transformation}
\index{transformation!normalization}
\subsection{\protect\label{sec:nortrn}Normalization Transformation}
This transformation is specified by mapping a {\it Window}
in the application's WC space to a {\it Viewport} in NDC space.
Both the WC window and the NDC viewport are rectangles parallel to the
x,y axes.
They are specified by calls to Set WiNdow and Set ViewPort:
\index{GKS routine!{\protect\tt GSWN}}
\index{GKS routine!{\protect\tt GSVP}}
\begin{XMP}
CALL GSWN (TNR, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSVP (TNR, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
\index{aspect ratio}
\index{TNR}
Because there may be several Normalization Transformations, the integer
value TNR indicates to which one the call applies.
The other values are real numbers defining the {\it bottom left} and
{\it top right} corners of the two rectangles. The window is
specified in the WC system, and the viewport in the NDC system. If the
x and y sides of the window and the viewport do not have the same
{\it Aspect Ratio} a distortion effect will take place.
This may or may not be intentional!
As has been mentioned, it is possible to generate a complex picture
with different world coordinates for different parts of the image,
or to place several pictures on different areas of the screen.
This is achieved by specifying multiple normalization transformations.
Each transformation is assigned a transformation number (TNR) for which
the range is 0 to some implementation-dependent maximum (usually about 20).
Thus, the transformation corresponding to each number
specifies a separate window-to-viewport mapping.
If the viewports overlap then their contents will be superimposed.
The default transformation number is 0, for which the WC and NDC
coordinate systems are the same.
Transformation 0 may not be modified, and so its use
is always equivalent to calling:
\begin{XMP}
CALL GSWN (TNR, 0.0, 1.0, 0.0, 1.0)
CALL GSVP (TNR, 0.0, 1.0, 0.0, 1.0)
\end{XMP}
The application specifies which normalization transformation is
active at a given time by a call to GSELNT (SELect Normalization
Transformation):
\index{GKS routine!{\protect\tt GSELNT}}
\begin{XMP}
CALL GSELNT(TNR)
\end{XMP}
All primitives created subsequently will be transformed according to this
transformation until a different one is selected.
\begin{figure}[h]
\index{window}
\index{viewport}
\caption{Windows and Viewports}
\label{fig:windows}
\end{figure}
\subsection{\protect\label{sec:wstntfm}Workstation Transformation}
\index{workstation!transformation}
\index{transformation!workstation}
The method of defining which portion of NDC space is to appear on
a specific workstation is similar to the way a viewport is positioned
in NDC space (the Normalization Transformation). The {\it Workstation
Transformation} defines that part of NDC space which will be visible,
and where it will appear on the display surface.
It is set by calling the functions Set WorKstation WiNdow and
Set WorKstation ViewPort:
\index{GKS routine!{\protect\tt GSWKWN}}
\index{GKS routine!{\protect\tt GSWKVP}}
\index{window}
\index{viewport}
\index{workstation!window}
\index{workstation!viewport}
\begin{XMP}
CALL GSWKWN(WKID, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSWKVP(WKID, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
Set Workstation Window specifies in NDC coordinates the area of the NDC
space to be output to the device. Set Workstation Viewport specifies in
{\it Display Coordinates} (DC) where on the device the window will appear.
These functions may be called at any time. However, whether or not
the effect is immediately visible is workstation-dependent.
For example, if changing the workstation transformation requires an
implicit image regeneration, then the timing of when it will take place
is affected by the current deferral mode (see section on Page~\pageref{sec:defsta}).
\index{implicit regeneration}
\index{deferral states}
\index{aspect ratio}
The aspect ratio for the workstation window and the workstation viewport
{\bf must always be the same}.
If they are not, then {\bf the specified transformation is ignored},
and the complete workstation window is displayed on the device
in the correct aspect ratio.
A rectangle is used with the bottom left corner set to the bottom
left corner of the specified viewport, and which is as large as possible.
The {\bf default setting} for the workstation transformation is
to map the whole unit square of NDC onto the largest square possible for the
workstation. For rectangular displays, this means that not all of
the display surface is used. Conversely, if the workstation
transformation is set so that all of a rectangular display is
used, then either: (1) the normalization transformation can not
preserve the aspect ratio, or: (2) not all of NDC space can be used.
\index{GKS routine!{\protect\tt GQDSP}}
\index{GKS routine!{\protect\tt GQMDS (see GQDSP)}}
To inquire the range of device coordinates corresponding to a particular
workstation type, it is possible to call the function GQDSP
(inQuire Display SPace). (Note: The 3D version is called GQDVOL.)
\begin{XMP}
CALL GQDSP (WTYPE, ERRIND, DCUNIT, RX, RY, LX, LY)
\end{XMP}
The routine returns the units in which the display surface
is measured (DCUNIT), and also the maximum x and y values
in each direction.
Some devices, for example hardcopy plotters, are measured in metres
(DCUNIT='GMETRE'), so the routine will return the actual size of the
device. Other devices, for example graphics terminals,
will be defined in raster or some other units (DCUNIT='GOTHU').
RX, RY are real device coordinates,
and LX, LY are in integer raster units.
As an example of how to use this routine, below is a fragment of code
which allows an application to draw on the whole of a rectangular
display surface with a 'Landscape' aspect ratio
(width~>~height).
See also the example in Appendix on Page~\pageref{sec:ex3dv}.
\begin{XMP}
C Inquire the Workstation Display Surface Size
CALL gqdsp (wtype, errind, dcunit, rx, ry, lx, ly)
C
C Set the Workstation Window and Viewport (assume x > y)
CALL gswkwn(wkid, 0.0, 1.0, 0.0, ry/rx)
CALL gswkvp(wkid, 0.0, rx, 0.0, ry)
C
C Make sure Workstation Transformation is used
CALL guwk (wkid, gperfo)
C
C Set Window and Viewport for Normalization Tfrm 'trn'
CALL gswn (trn, xmn, xmx, ymn, ymx)
CALL gsvp (trn, 0.0, 1.0, 0.0, ry/rx)
C
C Select this Normalization Tfrm for future primitives
CALL gselnt(trn)
\end{XMP}
\begin{figure}[h]
\caption{Normalization and Workstation Transformations}
\label{fig:trafos}
\end{figure}
\subsection{\protect\label{sec:nrmclp}Clipping}
\index{clipping}
\index{GKS routine!{\protect\tt GSCLIP}}
Clipping is the exclusion of those primitives, or parts of primitives,
which lie outside a specified window or viewport. That is to say, if
clipping to a particular window is enabled, then only those parts of
primitives which lie within the boundaries of the window will be
displayed. Both the normalization transformation viewport (in NDC space)
and the workstation window can be used to clip a picture.
Clipping at the boundary of the normalization transformation viewport
(clipping rectangle) effects the image displayed on ALL workstations.
However, clipping at the boundary of the
workstation window only affects the image on the display surface
belonging to that particular workstation.
This is illustrated in Workstation Viewport 1 in \ref{fig:windows}.
The application can control whether clipping at the boundary of the
clipping rectangle is enabled or not by calling the routine GSCLIP(VAL).
This routine takes the value 'GNCLIP' (no clip) or 'GCLIP' (clip),
and the default is to clip.
Clipping at the workstation window may not be switched off.
\section{\protect\label{sec:seghdr}Segments}
\index{segments}
GKS provides a way to collect and store together the primitives that
make up all or part of a picture. Such a collection of primitives is
called a {\it segment}, has a unique name, and may be manipulated
as a unit in various ways.
Only one segment can be open at a time, and once a segment has
been closed further output primitives cannot be added to it,
nor can the primitives in it be modified.
\index{non-retained data}
Any {\it non-retained} data output whilst there is no open
segment will be lost if the screen is cleared for some reason.
There are several circumstances when this may be useful.
For example, a message written on the screen may have only limited
validity, or one may wish to output a very complicated image with
too much data to be stored, or which needs only to be displayed once.
However, much of the power of GKS comes from the ability to
interactively modify the image {\it without} always having
to regenerate the complete picture.
To make use of this feature, primitives must be stored in segments.
\subsection{Segment Storage}
\index{segment!storage}
\index{WDSS}
\index{WISS}
Every workstation in a GKS output level 1 or 2 implementation has
associated with it a {\it Workstation Dependent Segment Store}
(WDSS). At the time a segment is created, it is stored in the WDSS of all
open and activated workstations.
The WDSS may be physically located in the Graphics Terminal,
in which case there can be a substantial improvement in performance.
Of course, care must be taken not to exceed the available memory space.
In addition to WDSS, GKS output level 2 implementations
have also a {\it Workstation Independent Segment Store} (WISS).
WISS is a mechanism for storing segments in a
workstation-independent way, and allows segments to be re-used
and manipulated on different workstations at different times.
WISS is treated like any another workstation, just like a terminal
for example, and if it is open and activated when a segment is created,
then that segment will be stored there
as well as on the WDSSs of the other active workstations.
For example, one might store an object in WISS at the same time
as displaying it on a terminal. Then, at a later time, it would
be possible to make a hardcopy of the object by extracting the segment(s)
in which it was stored from the WISS and copying it (them) to a plotter.
A transformation or segment deletion affects the segment wherever it is stored.
Before primitives are stored in WDSS or WISS they first undergo
the normalization transformation currently in force.
Also, when a segment is created, the current clipping rectangle
together with the clipping flags, are stored in the segment,
but clipping is not performed
on the primitives when they are stored, but only when the
contents of the segment are output to a device.
\subsection{Segment Creation, Deletion, and Re-naming}
To use GKS segmentation, first a segment must be created by calling:
\index{GKS routine!{\protect\tt GCRSG}}
\begin{XMP}
CALL GCRSG(SGNA)
\end{XMP}
where SGNA is an integer segment name. A segment called SGNA will
be created on all active workstations, including WISS, and will remain
open and store all primitives and attributes which are output until one calls:
\index{GKS routine!{\protect\tt GCLSG}}
\begin{XMP}
CALL GCLSG
\end{XMP}
Only a single segment may be open at one time.
Segments may be renamed, or deleted on a particular workstation
or all workstations by the following calls:
\index{GKS routine!{\protect\tt GRENSG}}
\index{GKS routine!{\protect\tt GDSGWK}}
\index{GKS routine!{\protect\tt GDSG}}
\begin{XMP}
CALL GRENSG(SGNOLD, SGNNEW)
CALL GDSGWK(WKID, SGNA)
CALL GDSG(SGNA)
\end{XMP}
Once a segment has been deleted the same segment name may be re-used.
Note that the Clear Workstation function deletes all the segments
stored in WDSS on that workstation.
The following fragment of code shows the use of segments.
See also the example in Appendix on Page~\pageref{sec:ex3dv}.
\begin{XMP}
---------
---------
CALL gcrsg(1) ! Create segment 1
CALL objone ! Draw an object
CALL gclsg ! close segment
CALL gcrsg(2) ! Create segment 2
CALL objtwo ! Draw an object
CALL gclsg ! Close segment
---------
---------
---------
CALL guwk(wkid, gperfo) ! Update Workstation
---------
---------
\end{XMP}
\subsection{\protect\label{sec:segnam}Segment Names}
\index{segment!names}
As indicated above, the FORTRAN binding requires segment names to be
integers, and these may be restricted by an implementation to a particular
range of values (1-32763 for GKSGRAL). However, because segment names
are specified by integers does not imply that an implementation has to
make available as many segments as there are names, and the maximum
number of available segments is also an implementation-dependent feature
which may be much smaller than the maximum legal name.
(Thus, one can not assume that an implementation stores name {\it N}
as the {\it N}'th element of an array.)
GKS does not provide inquiry functions to obtain either of these maximum
values, so one must consult the relevant documentation.
The GKS standard provides a function, GQSGUS, to inQuire the set of SeGment
names in USe. However, as it is difficult to work with variable length
sets in FORTRAN, the FORTRAN binding has modified this function to allow
only the inquiry of the name of one element of the set at a time:
\begin{XMP}
CALL GQSGUS(N, ERRIND, NSG, SGNA)
\end{XMP}
where
\begin{DLtt}{123456}
\item[N]is the N'th segment in the set (an input parameter)
\item[ERRIND]is an error number
\item[NSG]is the total number of segments in use
\item[SGNA]is the name of the N'th segment
\end{DLtt}
Thus, in FORTRAN, it is necessary to call GQSGUS in a loop from 1 to NSG
in order to obtain a list of all the segment names in use.
Note that the function:
\begin{XMP}
CALL GQSGWK(WKID, N, ERRIND, NSG, SGNA)
\end{XMP}
works in a similar way to GQSGUS but provides the list of segment names
associated with workstation WKID.
\subsection{\protect\label{sec:segasi}Segment Association, Copying, and Insertion,}
\index{segment!association}
\index{segment!copying}
\index{segment!insertion}
To view the contents of segments stored in WISS
it is necessary to transfer them to a real device.
This can be accomplished in three ways:
\begin{DLtt}{123456}
\item[Association]
Associate SeGment to WorKstation retrieves a segment stored in WISS
and sends it to the designated workstation as if this workstation had been
activated when the segment was first created.
It is thus stored in the WDSS of the designated workstation.
Nothing may be added to the segment.
There must be no open segment when this function is called.
\index{GKS routine!{\protect\tt GASGWK}}
\begin{XMP}
CALL GASGWK(WKID, SGNA)
\end{XMP}
\item[Copying]
Copy SeGment to WorKstation transforms the {\it contents}
of the designated WISS segment using the segment transformation
(described below), and then sends the result to the workstation specified.
The clipping volume of the copied segment is also sent to the
workstation. Note that the copied segment itself is not transferred;
its contents are just drawn on the output device {\bf without being
stored in WDSS}.
Thus, there must be no open segment when this function is called.
\index{GKS routine!{\protect\tt GCSGWK}}
\begin{XMP}
CALL GCSGWK(WKID, SGNA)
\end{XMP}
\item[Insertion]
INsert SeGment transforms the {\it contents} of the designated WIS
segment using both the transformation stored in the segment header
{\bf and} then the matrix provided in the call (MTX),
before inserting the results into the output stream as if
they had come directly from the application.
Thus, the output of Insert Segment goes to all activated workstations,
including the WISS, and will be stored in a segment if one is open.
All clipping rectangles and indicators, etc. are ignored.
The transformation matrix, MTX, may be produced using the routine
GEVTM as described below.
\index{GKS routine!{\protect\tt GINSG}}
\begin{XMP}
CALL GINSG(SGNA, MTX)
\end{XMP}
\end{DLtt}
The flow of graphics data between WISS, WDSS and the application
is shown in \ref{fig:dataflo}.
\begin{figure}[h]
\caption{Data Flow for GKS}
\label{fig:dataflo}
\end{figure}
\subsection{Segment Attributes}
\index{segment!attributes}
The appearance of segments depends on the following segment attributes:
\begin{UL}
\item segment transformation
\item visibility
\item highlighting
\item segment priority
\item detectability
\index{transformation!segment}
\end{UL}
Segment attributes may be modified after the segment has been closed,
and the changes will become visible on all active workstations
on which the segment has been stored. However, the timing of when these
changes take place is workstation-dependent, and may also be
affected by the deferral mode which is in force
(see sections on Page~\pageref{sec:defsta} and on Page~\pageref{grsgwk}).
This is because some workstations may require picture regeneration
to produce the new attribute state on the display.
\subsubsection{\protect\label{sec:segtfm}Segment Transformations}
\index{segment!transformation}
A {\it segment transformation} is a transformation of all the
coordinates within a segment and is performed by a 2~x~3
matrix stored in the segment header. It maps from NDC to NDC. As an
example of the use of segment transformations, consider a circuit
design application which has symbols for transistors, capacitors,
resistors, etc. Such an application would store each symbol in a
separate segment, and then call INsert SeGement specifying a
transformation matrix in order to duplicate a particular symbol at the
positions and orientations required.
When a segment is created GKS sets a default null transformation
which leaves the original coordinates unchanged.
Before setting a transformation it is necessary
to evaluate the transformation matrix by either using one's own
algorithm, or by using the routines:
\index{GKS routine!{\protect\tt GEVTM}}
\index{GKS routine!{\protect\tt GACTM}}
\begin{XMP}
CALL GEVTM(X0, Y0, DX, DY, PHI, FX, FY, SW, MXOUT)
and/or
CALL GACTM(MXIN, X0, Y0, DX, DY, PHI, FX, FY, SW, MXOUT)
\end{XMP}
GEVTM evaluates a matrix (MXOUT), whilst GACTM accumulates changes to
an existing matrix (MXIN).
Both routines require the definition of:
\begin{DLtt}{123456}
\item[X0, Y0]
(real) a fixed reference point about which 2D rotations take place.
\item[DX, DY]
(real) a translation (or shift) vector.
\item[PHI]
(real) an angle of rotation about X0, Y0.
\item[FX, FY]
(real) X and Y scale factors.
\item[SW]
(enumerated) a switch specifying whether the reference point and
shift vector are given in World Coordinates
or Normalized Device Coordinates ('GWC' or 'GNDC').
\end{DLtt}
The transformation is composed in the order: scale, rotate, shift.
In the case of GACTM, the matrix MXIN is pre-concatenated with that
formed from the scale, rotate, and shift parameters, so
MXOUT~=~SHIFT~*~ROTATE~*~SCALE~*~MXIN.
Once the transformation matrix has been evaluated, it may then be
Set in the SeGmenT by calling the routine:
\index{GKS routine!{\protect\tt GSSGT}}
\begin{XMP}
CALL GSSGT(SGNA, MTX)
\end{XMP}
An example of using a segment transformation may be
found in Appendix on Page~\pageref{sec:ex3dv}.
\subsubsection{Visibility}
\index{segment!visibility}
Segment {\it VISibility} (GSVIS) determines whether or not the
segment is displayed; the default is for the segment to be visible.
The values are 'GINVIS' or 'GVISI'.
As an example of its use, messages or icons could be created in segments
which would normally be invisible, but which could be made visible
at the appropriate time by the application program. The call is:
\index{GKS routine!{\protect\tt GSVIS}}
\begin{XMP}
CALL GSVIS(SGNA, GVISI)
\end{XMP}
Note that if a segment is invisible it is not detectable
(cannot be picked), even if detectability is enabled.
Also, even if a segment has visibility enabled, it may not actually be
drawn if the deferral state is set to something other than 'GASAP'
(see section on Page~\pageref{sec:defsta}).
\subsubsection{Highlighting}
\index{segment!highlighting}
Many display systems have some means for {\it highlighting}
graphical output, eg by making it brighter, or by causing it to blink.
The implementation of this attribute is dependent on the device
being used. Note that in order for a Segment to be HighLIghTed (GSHLIT),
it must first be made visible. The default is not highlighted.
The possible values are 'GNORML' or 'GHILIT'. The call is:
\index{GKS routine!{\protect\tt GSHLIT}}
\begin{XMP}
CALL GSHLIT(SGNA, GHILIT)
\end{XMP}
\subsubsection{Priority}
\index{segment!priority}
SeGment {\it Priority}, a real number in the range 0.0 to 1.0,
enables the control of the order in which segments are re-drawn when
the picture has been changed.
It also controls the order in which segments are picked.
If parts of segments overlap, then the segment with the highest
priority will be picked first and displayed on top of all the others.
If two segments with the same priority occupy the same area of the display
surface, then the last one will be drawn on top.
The default priority is 0.0. The call is:
\index{GKS routine!{\protect\tt GSSGP}}
\begin{XMP}
CALL GSSGP(SGNA, PRIOR)
\end{XMP}
The number of segment priority levels available (NSGP) for a particular
workstation, WTYPE, may be inquired using:
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
\subsubsection{Detectability}
\index{segment!detectability}
Segment {\it DeTECtability} controls which segments can be
picked by the operator using a Logical Pick Device, and which ones cannot.
Values are 'GUNDET' or 'GDETEC'.
If a segment is detectable, it can be picked.
The default setting is un-detectable. The call is:
\index{GKS routine!{\protect\tt GSDTEC}}
\begin{XMP}
CALL GSDTEC(SGNA, GDETEC)
\end{XMP}
Note that if a segment is invisible it is not detectable
(cannot be picked), even if detectability is enabled.
\subsection{The Pick Identifier}
\index{pick input}
\index{pick identifier}
It is possible to attach an integer {\it Pick Identifier} to primitives
which are stored in segments using the routine:
\index{GKS routine!{\protect\tt GSPKID}}
\begin{XMP}
CALL GSPKID(PCID)
\end{XMP}
This inserts one or more Pick Identifiers (PCIDs) into a
segment, and these are associated with the subsequent primitives.
If one of these primitives is picked sometime later using a Logical Pick
input device (see on Page~\pageref{sec:inphdr}), then one of the
pieces of information returned to the application is its PCID.
As the application is free to choose the value of the Pick Identifier,
this provides a second level of naming,
and provides a mechanism to refer back to an application database.
For example, if the identifiers 1 to 4 were attached to the four wheels
of a car, then picking wheel number three would return the identifier
'3' to the application program.
\subsection{\protect\label{sec:grsgwk}Segment Re-drawing}
\index{segment!re-drawing}
It is possible to force all segments within the WDSS on a particular
workstation to be re-drawn by calling the routine
Re-draw SeGments on WorKstation:
\index{GKS routine!{\protect\tt GRSGWK}}
\begin{XMP}
CALL GRSGWK(WKID)
\end{XMP}
\index{non-retained data}
The routine clears the screen, performs all deferred actions,
and re-draws all segments. All non-retained data is lost.
Possible reasons to re-draw all segments are:
\begin{OL}
\item if a segment had been over-written or deleted and it is
desired to regenerate a clean image;
\item if one wishes to remove all non-retained data,
system messages, etc;
\item if, on this workstation, image regeneration is required in order to
display the effect of modifying a segment attribute, and implicit
regeneration is switched off.
\end{OL}
The action of this routine differs from that of
Update WorKstation (GUWK), which may or may not re-draw segments
which have not been changed.
\section{Graphical Input}
\subsection{\protect\label{sec:inphdr}Logical Input Devices}
\index{input}
\index{logical input devices}
\index{input!classes}
\index{input!device numbers}
GKS organizes data that can be input to an applications program
into six types, each related to a {\it Logical Input Device}.
The actual physical input devices are mapped onto these logical
devices, which makes it possible for GKS to organize the different
forms of data in a device-independent way, and thus helps to make the
code more portable.
A logical input device is identified by 3 items:
\begin{OL}
\item a workstation identifier
\item an input class
\item a device number
\end{OL}
The six input classes and the logical input values they provide are:
\begin{DLtt}{123456}
\item[LOCATOR]
\index{locator input}
\index{transformation!locator input}
\index{viewport}
Returns a position (an x,y value) in World Coordinates
and a Normalization Transformation number corresponding to that
used to map back from Normalized Device Coordinates to World Coordinates.
The NT used corresponds to that viewport with the highest
{\it Viewport Input Priority} (set by calling GSVPIP).
{\bf Warning:} {\it If there is no viewport input priority set then
NT 0 is used as default, in which case the coordinates are
returned in NDC.} This may not be what is expected!
\index{GKS routine!{\protect\tt GSVPIP}}
\begin{XMP}
CALL GSVPIP(TNR, RTNR, RELPRI)
\end{XMP}
\begin{DLtt}{123456}
\item[TNR]Transformation Number
\item[RTNR]Reference Transformation Number
\item[RELPRI]
One of the values 'GHIGHR' or 'GLOWER' defined in the Include File,
ENUM.INC, which is listed in the Appendix on Page~\pageref{sec:hdenum}.
\end{DLtt}
\item[STROKE]
\index{stroke input}
Returns a sequence of (x,y) points in World Coordinates
and a Normalization Transformation as for the Locator.
\item[VALUATOR]
\index{valuator input}
Returns a real value, for example, to control some sort
of analogue device.
\item[CHOICE]
\index{choice input}
Returns a non-negative integer which represents a choice from a
selection of several possibilities. This could be implemented as a
menu, for example.
\item[STRING]
\index{string input}
Returns a string of characters from the keyboard.
\item[PICK]
\index{pick input}
Returns a segment name and a pick identifier of an object pointed
at by the user. Thus, the application does not have
to use the locator to return a position, and then try to find out
to which object the position corresponds.
\end{DLtt}
\subsection{Prompt/Echo Types and Triggers}
\index{prompt/echo type}
\index{trigger}
\index{input!prompt/echo type}
\index{input!trigger}
A {\it Prompt} and an {\it Echo} type is defined for
each logical input device.
For example, enabling the pick device might
prompt with a cursor of a particular shape which would track
the tablet or mouse to constitute an echo.
Acceptance of a {\it trigger} by the application,
hitting a key, for example, causes feedback
via an {\it acknowledgment} process.
Thus, the picked object could be made to blink.
Different prompt/echo types can be set when initializing the device.
They are implementation and workstation-dependent,
so see the relevant reference manual for details.
\subsection{Input Modes}
\index{input!modes}
\index{input!request}
\index{input!sample}
\index{input!event}
\index{request input}
\index{sample input}
\index{event input}
Logical input devices can be operated in three modes:
{\it Request}, {\it Sample}, and {\it Event}
('GREQU', 'GSAMPL', 'GEVENT').
In Request mode the application enables a device and then waits
for the user to trigger input, perhaps by pushing a key.
This is similar to issuing a FORTRAN {\it READ}.
In Sample mode the application can {\it measure}
the current value of the device, for example the locator position,
without waiting for a trigger. Finally, in Event mode, the application
can enable a set of several logical devices simultaneously.
Output measures from devices which have been triggered will be
placed in an {\it Event Queue}, from whence they can be
extracted by the application.
Note that in GKS input level 'b' only Request mode input is available,
and that it is not possible to request GKS input
and issue a FORTRAN I/O operation at the same time.
(However, although not required by the standard, the GKSGRAL level 'b'
implementation provides Sample input for some devices.)
\subsection{Request Input Functions}
\index{GKS routine!{\protect\tt GRQCH}}
\index{GKS routine!{\protect\tt GRQLC}}
\index{GKS routine!{\protect\tt GRQST}}
\index{GKS routine!{\protect\tt GRQSK}}
\index{GKS routine!{\protect\tt GRQVL}}
\index{GKS routine!{\protect\tt GRQPK}}
To request input one of the following calls needs to be made.
\begin{DLtt}{123456}
\item[Locator]CALL GRQLC(WKID, LCDNR, STAT, TNR, PX, PY)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[LCDNR](I) The number of the logical locator device (usually 1).
\item[STAT](O) Error status (integer)
\item[TNR](O) The Normalization Transformation number used to
convert the input position to World Coordinates.
\item[PX,PY](O) The returned coordinates in WC.
\end{DLtt}
\item[String]CALL GRQST(WKID, STDNR, STAT, LOSTR, STR)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[STDNR](I) The number of the logical string device (usually 1).
\item[STAT](O) Error status (integer)
\item[LOSTR](O) string length
\item[STR](O) The returned character string
\end{DLtt}
\item[Stroke]CALL GRQSK(WKID, STDNR, N, STAT, TNR, NPX, PX, PY)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[STDNR](I) The number of the logical stroke device (usually 1).
\item[N](I) Maximum number of points (size of point arrays).
\item[STAT](O) Error status (integer)
\item[TNR](O) The Normalization Transformation number used to
convert the input position to World Coordinates.
\item[NPX](O) Number of points returned.
\item[PX,PY](O) The returned coordinate arrays (WC).
\end{DLtt}
\item[Valuator]CALL GRQVL(WKID, VLDNR, STAT, VAL)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[VLDNR](I) The number of the logical valuator device (usually 1).
\item[STAT](O) Error status (integer)
\item[VAL](O) The returned value (real number).
\end{DLtt}
\item[Choice]CALL GRQCH(WKID, CHDNR, STAT, CHNR)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[CHDNR](I) The number of the logical choice device (usually 1).
\item[STAT](O) Error status (integer)
\item[CHNR](O) The returned choice number (integer).
\end{DLtt}
\item[Pick]CALL GRQPK(WKID, PCDNR, STAT, SGNA, PCID)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier.
\item[PCDNR](I) The number of the logical pick device (usually 1).
\item[STAT](O) Error status (integer)
\item[SGNA](O) Picked segment name.
\item[PCID](O) Pick identifier (integer set by GSPKID).
\end{DLtt}
\end{DLtt}
\subsection{Initializing Logical Input Devices}
\index{initializing logical input devices}
For each of the input classes there is an initialization function which
can change the values set as default by GKS for use in Request Mode
(and which should be called after the mode has been set).
The function calls are:
\index{GKS routine!{\protect\tt GINCH}}
\index{GKS routine!{\protect\tt GINLC}}
\index{GKS routine!{\protect\tt GINST}}
\index{GKS routine!{\protect\tt GINSK}}
\index{GKS routine!{\protect\tt GINVL}}
\index{GKS routine!{\protect\tt GINPK}}
\begin{DLtt}{123456}
\item[Initialize locator]CALL GINLC(...)
\item[Initialize string]CALL GINST(...)
\item[Initialize stroke]CALL GINSK(...)
\item[Initialize valuator]CALL GINVL(...)
\item[Initialize choice]CALL GINCH(...)
\item[Initialize pick]CALL GINPK(...)
\end{DLtt}
\index{echo area}
\index{normal!transformation}
\index{transformation!input initialization}
For all the input classes the echo area can be changed
(i.e. the portion of the display surface where the prompt appears and
which can accept input from the operator).
For Locator and Stroke input the initialization function sets the
initial normalization transformation and the initial locator position.
The initialization functions also select the required prompt/echo type.
A detailed description of the initialization functions is outside the scope
of this Primer, so those readers who wish to dig somewhat deeper should
consult the reference manual for the GKS implementation being used.
An example using GKSGRAL may be found in
Appendix on Page~\pageref{sec:iinput}).
\subsection{Input Inquiry Functions}
\index{input!inquiry functions}
There are two types of inquiry functions for GKS input. The first
is {\it Inquire Device State}, and the information is obtained by
calling GQCHS, GQLCS, etc.
The second is {\it Inquire Default Device Data} and the
information is obtained by GQDVL, GQDST etc. There is also a function
GQLI which can inquire the number of available logical input devices.
A detailed description of these functions is outside the scope of this
Primer, and so for more information the reader is referred to the
GKSGRAL manual or one of the texts in the bibliography.
\chapter{GKS Metafiles}
\index{metafiles}
GKS provides metafiles for the storage of graphical information.
Their principal uses are:
\begin{OL}
\item transporting graphical information between computer systems
\item transporting graphical information from one site to another
(by magnetic tape for example)
\item device spooling, e.g. for a plotter
\end{OL}
\index{Appendix E metafile}
There is no official ISO standard for writing a GKS metafile.
However, in Appendix E of the ISO GKS Functional Description
document a metafile format is described, and its use is recommended.
A GKS metafile created using this format is
known as an Appendix E metafile.
Unfortunately, not all implementations follow the Appendix E format,
and so metafiles created by different GKS packages may be incompatible.
In fact, even different examples of Appendix E metafiles may be
incompatible due to variations in the file record lengths, etc.
\section{Writing Metafiles}
A GKS metafile is produced by a standard GKS output-only workstation.
The workstation must first be 'opened' (GOPWK), then 'activated' (GACWK),
and all graphical information following these calls is recorded on the
metafile as a series of items (listed in an appendix of the
{\it GKS/GKS-3D Primer})
until a 'deactivate workstation' is encountered (GDAWK).
Hence the application must control it in the same way as a terminal
or a plotter. Clearly, some of the workstation inquiry functions,
such as Inquire Text Extent, can not be used because this knowledge
depends on the device(s) ultimately chosen to interpret the metafile.
Thus, a GKS metafile does not record a complete 'picture' so much
as all the actions performed to make such a picture. If the application
deletes segments and then draws different ones, all this will be recorded
on the metafile if it is active.
This last point has a number of ramifications. Because the metafile
{\it captures} all the output generated whilst it is active,
it could be used to record a dynamic sequence, like a cartoon,
as long as it is re-interpreted onto a dynamic display system.
However, if the object of the exercise is to interpret the metafile onto
a hardcopy device, it is not clear what the interpreter would be expected
to do with, for example, a Delete Segment operation!
Thus, for this kind of use, the metafile workstation should only be
activated when a finished picture is ready to be output, perhaps by
copying the image from WISS. A classic mistake is to leave the metafile
workstation activated whilst one works interactively on a terminal,
and then crash when all the available disc space is used up.
\index{GKS routine!{\protect\tt GOPWK}}
\index{GKS routine!{\protect\tt GACWK}}
\index{GKS routine!{\protect\tt GDAWK}}
\index{GKS routine!{\protect\tt GCLWK}}
\index{conid}
\index{connection identifier}
To open an Appendix E metafile workstation the call is:
\begin{XMP}
CALL GOPWK(WKID, CONID, WTYPE)
\end{XMP}
where the parameter WTYPE specifies the metafile workstation which is
defined in the documentation for the GKS implementation in use
(see Appendix on Page~\pageref{sec:gtstyp} for the GKSGRAL values).
\index{VAX!metafile open}
\index{IBM!metafile open}
The metafile will be written to the logical unit number corresponding
to the connection identifier (conid) in the GOPWK call
(see section on Page~\pageref{sec:conref} for legal values).
On VM/CMS, a conid of XX with no OPEN or FILEDEF statements would result
in a metafile name 'FILE~FTXXF001'.
A convenient way to declare FILEDEFs from a FORTRAN program is to use
the CERN library routine VMCMS (code Z305).
However, Version 2.3 of the VM/CMS FORTRAN compiler provides the
possibility to specify the file name in the OPEN statement,
and this is now the recommended mechanism. A '/' is required
before the filename:
\begin{XMP}
OPEN(UNIT=CONID, FILE='/filename filetype filemode', STATUS='NEW')
\end{XMP}
On VAX/VMS the metafile can be written to a file which should be opened
prior to the call to GOPWK. If there is no OPEN statement, the metafile will
be written to FOR0XX.DAT, where XX is the specified conid, unless
FOR0XX is defined as a logical name.
Under VMS, the OPEN statement should read:
\begin{XMP}
OPEN(UNIT=CONID, FILE='filename.ext', STATUS='NEW')
\end{XMP}
On APOLLO the OPEN statement is mandatory with a format as above.
On the CRAY the OPEN statement is optional. If it is missing, then
the file will be given the name 'fort.N' where N is a number from 1 to
99 corresponding to the connection id. Note that unlike on VMS,
a value of N from 1 to 9 has no leading 0.
If a file name has not been defined via the open statement, then one can
be assigned using the command 'assign~-a~myname~fort.N'.
\section{Shipping Metafiles}
One of the fundamental uses of a metafile is that it can
be stored in a device independent manner and transferred to
other hosts and other sites where it can be subsequently interpreted.
Metafiles at CERN are normal text files written in 80 character records,
which makes transfers between host computers straightforward.
However, it should be mentioned that for transfers from VAX/VMS to VM/CMS
using NFT, the qualifier '/CR' is essential if the file has not been
opened using the parameter CARRIAGECONTROL='LIST'.
The REXX exec below gives an example of an appropriate metafile transfer
using interlink:
\begin{XMP}
/* Interlink metafile transfer */
'exec nft receive vxcern::disk$gg:[userid]gts.met gts metafile a/cr'
\end{XMP}
\section{GRVIEW and the Interpretation of Metafiles}
\index{metafile!interpreter}
\index{interpreting metafiles}
\index{GRVIEW}
\index{editor}
To use the graphical information stored in the metafile it must be
interpreted so that the original picture can be re-created on whatever
device the application requests.
GKS provides three functions in order to interpret metafiles,
and these may be called by applications which require to manipulate
the contents of a metafile explicitly:
\index{GKS routine!{\protect\tt GRDITM}}
\index{GKS routine!{\protect\tt GGTITM}}
\index{GKS routine!{\protect\tt GIITM}}
\begin{DLtt}{123456}
\item[GGTITM]get item from metafile
\item[GRDITM]read item from metafile
\item[GIITM]interpret item from metafile
\end{DLtt}
In order to view a GKS metafile on a terminal a program is available
called {\bf GRVIEW}.
This is an interactive program which allows the user to view and/or edit
the pictures (frames) in one or more metafiles.
The pictures may be viewed sequentially,
or it is possible to skip frames to get to a particular picture.
If the frames have been named using the {\bf GCNAME} function,
then it is possible to search for pictures by name, and also to make
a directory list of all pictures on the metafile.
\index{PostScript}
After viewing a picture the user has the possibility to write it out
onto a GKS metafile, or in various other formats, including PostScript
and Tektronix 4014 escape codes.
This allows sub-sets of the pictures on the original file to be produced.
There is also a feature provided which allows the position and scale of the
Workstation Viewport to be re-set. Thus, the size of the output picture can
be changed to suit better a particular device.
Without being quite so sophisticated, GRVIEW attempts to provide the sort
of capabilities available on commercial products such as {\it MACDRAW},
except that GRVIEW runs on VAX, APOLLO, and IBM computers with any GKS
supported terminal. Having said this, it should be clear that the
'user friendliness' of an editor is much greater on a colour APOLLO
or VAX workstation, or on a Tektronix 4207, than on a machine with only
the capabilities of a Tektronix 4014.
GRVIEW operates in one of three modes, which are selected on the command line:
\begin{OL}
\item VIEWING mode is the default. In this case GRVIEW is used to allow the
pictures on the metafile to be displayed on a graphics terminal.
In this pure viewing mode GRVIEW does not make use of any internal picture
storage and so will run more efficiently.
\item COPY mode also allows the input metafile to be displayed, but in
addition frames may be selected for copying onto an output file.
This may be another metafile, a PostScript file, or a file containing
Tektronix 4014 escape sequences. In copy mode the contents of each picture
may not be changed, but each picture may be named (if it is not already),
scaled in size, and several pictures may be packed onto a single page.
\item EDIT mode allows the pictures on an input metafile to be edited,
or for a completely new picture to be generated from scratch.
\end{OL}
Depending on the system in use, GRVIEW allows parameters to be provided
on the command line, or via an interactive dialogue. The program also
prompts the user to provide any missing information.
The user now HAS to define which terminal type is
being used because the diversity of incompatible terminals available does
not permit a useful default to be chosen.
There are also features which warn the user if too many GKS errors have
been produced (to avoid filling up the user's file space with messages),
and which allow VM/CMS users to regain control after a pre-set number of
graphics operations as VM has no simple interrupt facility.
\index{HELP}
\index{FIND}
More details of how GRVIEW operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRVIEW} or
{\bf FIND~GRVIEW}.
\section{GRCONV and the Conversion of Metafiles}
\index{converting metafiles}
\index{GRCONV}
As described in section on Page~\pageref{sec:mhcpref}, metafiles may be used for
the production of hardcopy output via the command {\bf GRPLOT}.
However, there are output devices for which there may not be support within
GRPLOT, or it may be desired to combine pictures into documents existing
in a particular output format or Page Description Language.
For these and other reasons it is often useful to be able to transform
a metafile into a different format, and to accomplish this the utility
{\bf GRCONV} has been written. Note, however, that keeping the
original picture in the form of a metafile is the most flexible, as
this does not place any restrictions on its final use.
GRCONV converts the input metafile to a new format which is stored on one
or more output files. For example, GRCONV can produce as output
normal or Encapsulated PostScript files, files of Tektronix 4014%
\footnote{Will be installed on VM/CMS if there is a demand.}
escape sequences, or bit maps in IBM~3812 format.
The PostScript files may be printed at CERN using the {\bf XPRINT}
command, as well as being used as a mechanism for the transmission of pictures
for printing at external institutes.
Pictures encoded in Encapsulated PostScript format
(see section on Page~\pageref{sec:epsref})
also may be incorporated into documents produced by SGML, BookMaster,
or TeX text processing systems.
The Tektronix 4014 escape sequences may be used to drive some laser printers,
including the DEC LN03, which do not support PostScript.
GRCONV operates in one of three modes: {\bf Interactively}, in which it
prompts for input and allows a selection of pictures chosen by commands from
an alpha-numeric terminal to be converted; {\bf Batch}, in which all the
relevant parameters must be given on the command line and in which the whole
metafile is converted; and {\bf Remote-Batch} which is similar to the
last case but in which the command is executed on a remote machine.
Thus, whilst Interactive and Batch modes run locally, the
Remote-Batch mode allows the input metafile to be converted into a form
which may only be available on the remote host. For example, this feature
allows GRCONV to subsume the old GKSSGML by producing a set of IBM-3812
bitmap files on CERNVM.
\index{HELP}
\index{FIND}
More details of how GRCONV operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRCONV} or
{\bf FIND~GRCONV}.
\chapter{\protect\label{sec:hcopy}Making Hardcopies}
\index{hardcopy}
It is possible to make hardcopy plots either interactively with GKS or via
a metafile. Some devices accept acetate sheets, which allow transparencies
to be produced for overhead projectors.
The GKS plotter or metafile workstations operate just like any other,
with an important exception. Namely, functions such as 'Delete Segment'
will not undraw what has been output on the paper!
Thus, when making a hardcopy, it is suggested that the picture to be plotted
is produced first interactively on a terminal screen and stored in
WISS (Workstation Independent Segment Storage). During this time
the plotter or metafile workstation should be deactivated.
When the picture is complete, and does not require further modification,
then the plotter or metafile workstation may be activated and the
WISS segments containing the picture sent to it by calling
Associate SeGment to WorKstation (GASGWK).
\section{Hardcopies via an Interactive Program}
Plotter output may be produced by an interactive program running on a
machine directly connected to a hardcopy device.
For example, GKSGRAL at CERN supports Hewlett Packard pen plotters,
VERSATEC electrostatic and thermal plotters, and any laser printer
\index{VERSATEC}
\index{PostScript}
driven via the PostScript interface. (See the latest version of the
include file {\it GTSDEV} for a complete up-to-date
list of supported devices.)
In the case of VERSATEC or PostScript,
an intermediate plot file is generally produced
which must be queued to the device.
However, use of an HP plotter normally assumes that the device is connected
directly in parallel with the terminal line, and it is activated by special
escape sequences sent by the driver, but it is also possible to produce
an HP plotter file by specifying a connection identifier of
conid~=~(100~+~n), where n is a small integer greater
than 1. This causes the output to be written to FORTRAN Logical Unit number n,
and the file has the name PLxxxx.PLT, where 'xxxx' indicates the
workstation type. Some laser printers do not support
PostScript but do, in fact, support the Tektronix 4014 protocol.
In this case it is possible to capture the 4014 escape codes
on a file which is later sent to the printer. In any case,
to capture the graphics on a file, first open a file on unit n,
and then open the corresponding GKS
workstation with a conid of (100~+~n).
Note that use of a hardcopy device via an interactive program has the
advantage that the application can inquire the specific device
characteristics, and thus tailor the output accordingly.
\section{\protect\label{sec:mhcpref}Hardcopies via a Metafile}
\index{GRPLOT}
\index{GKSVT}
\index{GKSCP}
\index{GKSX87}
\index{GKS3812}
\index{APA 6670}
\index{VERSATEC}
\index{XEROX}
\index{PostScript}
\index{IBM!metafile hardcopy}
\index{IBM!3812}
Using a metafile it is possible to make hardcopy plots on devices connected
to the central computing facilities. The devices currently supported
include:
\Lit{$==>$} To be updated ...
\begin{UL}
\item VERSATEC Model CE 3236E (colour with 36 inch roll paper, 400 dpi)
\item VERSATEC Versacolor Model VE2700 (white and transparent A4 cut sheet)
\item XEROX Model 4050 Laser Printer (A4 cut sheet)
\item IBM 3812 Laser Printers (A4 cut sheet)
\item PostScript printers, such as Apple Laser Writers, etc.
\end{UL}
It is not necessary to be logged on to one of the computer centre machines
to use these services. The command {\bf GRPLOT} may be used to output
metafiles on any of the central plotting devices from all centrally supported
machines at CERN connected via either the DECNET or TCP/IP protocols,
as the command first transfers the metafile over the network
to the correct destination.
The GRPLOT command has replaced the previous
collection of GKSVT, GKSCP, GKSX87, and GKS3812;
rather than having a separate command for each plotter, the output device is
provided as a parameter. The mechanism for naming output devices is the
same as that for the latest version of XPRINT.
\index{HELP}
\index{FIND}
More details of how GRPLOT operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRPLOT} or
{\bf FIND~GRPLOT}.
\begin{note}
Postscript and VERSACOLOR colour plotter produces output on
A4 cut sheets. However, the paper and ink donor rolls are expensive.
Thus, users are asked to use the device only for the final
version of their plots, and not whilst debugging programs.
Plots can not be released automatically, and users have to release them by
hand using a terminal next to the device
which is installed in the user area on the ground floor of
building 513.
\end{note}
\section{\protect\label{sec:sgmlgra}Mixed Text and Graphics}
\Lit{$==>$} To be updated ...
\subsection{Via SGML/BookMaster}
\index{SGML}
\index{PostScript}
\index{BookMaster}
\index{GRCONV}
A version of the document markup language SGML layered on top of the
IBM BookMaster product \cite{CERNSGM1} is implemented on the central
VM/CMS service and supports the inclusion of pictures into compound
documents.
The syntax of SGML/BookMaster is not identical to that of the original
Waterloo/Script based SGML implementation available at CERN, but the procedure
is similar. Namely,
\begin{OL}
\item Use a graphics application program, including calls to GCNAME,
to produce a metafile.
\item Run GRCONV to produce the set of picture files.
\item Mark-up a document with SGML/BookMaster and associate the picture files
with particular figures within the document.
\item Run SGML/BookMaster on the document.
\item Edit the document as necessary and repeat previous step until
result is satisfactory.
\end{OL}
Note that it is not necessary to repeat the first two steps every time
SGML/BookMaster is used to re-process the document as long as the picture
files are stored on the user's mini-disk.
The GRCONV command can be used to generate the picture files,
but as it is possible to print the resulting document on either IBM-3812
compatible or PostScript printers the user must take care to select
the correct output format.
As it would be cumbersome to require every picture to be stored in
its own metafile and processed separately, GRCONV can handle metafiles
which contain several pictures (separated by 'CLEAR WORKSTATION' items),
and be instructed to produce a separate output file for each picture on the
metafile.
If the user is not working under VM/CMS%
\footnote{Care must be taken to ensure that there is no conflict
in having GRCONV write to a VM/CMS mini-disk which is accessed in
write mode by another process. The user should have a write password
on a mini-disk available for access by GRCONV in write-mode},
GRCONV first transfers the metafile to the user's VM account, and then
interprets it to produce the required set of output files.
Note that GRCONV replaces the command GKSSGML, and has a wider range of
features.
A complete job to print a PostScript picture is given below.
PostScript pictures can be scaled at will, but if a {\bf BoundingBox}
command is present in the Encapsulated PostScript file,
the WIDTH parameter of the {\bf ARTWORK} tag is redundant,
and the size specified by the BoundingBox or DEPTH inside the file will
be used. In order to avoid conflicts in the X and Y scaling,
users are advised not to specify both DEPTH {\it and} WIDTH.
\begin{XMP}
<!DOCTYPE USERDOC SYSTEM "USERDOC DTD *" [
<!-- Declare the Encapsulated PostScript file -->
<!ENTITY FIGURE1 SYSTEM "MYFIG1$S EPS" CDATA EPS>
]>
<USERDOC>
<PROLOG>
<DOCPROF>
</PROLOG>
<BODY>
<... some text
<ARTWORK NAME=FIGURE1 DEPTH=17CM ALIGN=CENTER>
\end{XMP}
The procedure for output onto an IBM-3812 compatible printer
(IBM-3812 or IBM-3816) is similar, but in this case
the graphics image is stored not in an Encapsulated PostScript
file but in a PSEG3820 image file which cannot be scaled.
\begin{XMP}
<!DOCTYPE USERDOC SYSTEM "USERDOC DTD *" [
<!-- Declare the PSEG file -->
<!ENTITY FIGURE1 SYSTEM "MYFIG1$S PSEG3820 *" NDATA PSEG>
]>
<USERDOC>
<PROLOG>
<DOCPROF>
</PROLOG>
<BODY>
\chapter{Title text}
<ARTWORK NAME=FIGURE1>
\end{XMP}
Note that the figure name {\it FIGURE1} specified by the {\bf NAME}
attribute of the {\bf ARTWORK} tag is defined at the beginning of the
file via the {\bf ENTITY} declaration. Thus, the actual file name
on the user's disk and the name of the figure in the text are decoupled;
one can change the figure used by changing the ENTITY declaration
and without modifying the body of the text.
There should be one picture file and one ENTITY declaration for each figure.
SGML/BookMaster does not place restrictions on the file names used,
but the original CERN implementation of SGML did. Thus, for backwards
compatability, GRCONV still generates file names conforming
to the old convention: the file name must contain
eight (8) characters terminating in an 'S'.
As described in section on Page~\pageref{sec:gcnref},
if the name supplied by the application
which calls GCNAME provides less than 7 characters, then GCNAME pads the name
out with \Lit{'$'}s. Hence \Lit{'MYFIG1'} becomes \Lit{'MYFIG1$S'}.
If GCNAME was not used by the application to write names for
each figure onto the metafile then GRCONV will generate names
for each picture file automatically. The algorithm used is described
in \cite{bib-grref}.
\subsection{Via Waterloo/Script SGML}
\Lit{$==>$} KEEP it ???
\index{SGML}
\index{GGRCONV}
The original version of the document markup language SGML \cite{bib-sgmlref}
implemented on the central VM/CMS service supports the inclusion of pictures.
However, note that this SGML implementation is being replaced
by a new one layered on top of the IBM BookMaster product (see above).
For each picture to be included when using the IBM-3812
output devices SGML requires two files;
one to define the amount of space to be reserved within the document,
and one containing the actual bit-map. These files must be accessible to
SGML on one of the users mini-disks.
As described in \cite{bib-sgmlref}, the files may originate from several
sources. This section will discuss how to use the {\bf GRCONV} command to
produce them from a GKS metafile and store them on the specified mini-disk.
The sequence of operations would be:
\begin{OL}
\item Use a graphics application program, including calls to GCNAME,
to produce a metafile.
\item Run GRCONV to produce the set of picture files and store them on
a user disk.
\item Mark-up a document with SGML and associate the picture files
with particular figures within the document.
\item Run SGML on the document.
\item Edit the document as necessary and repeat previous step until
result is satisfactory.
\end{OL}
Note that it is not necessary to repeat the first two steps every time
SGML is used to re-process the document so long as the picture files
are stored on the user's disk.
An example of the SGML syntax necessary to include a figure corresponding
to a pair of picture files would be:
\begin{XMP}
<FIG>
<PICTURE NAME=XXXXXXXS>
<FIGCAP>This is the Figure Caption
</FIG>
\end{XMP}
When SGML encounters the tag 'PICTURE' it reads a file
specified by the 'NAME' attribute.
The name {\bf must} consist of 8 characters ending with an 'S'.
The file type under VM/CMS is 'SGML'. (The name of the corresponding
bit-map file has the same first 7 characters, but the eighth is 'P'.)
The GRCONV command can be used to generate these two files.
In fact, as it would be cumbersome to require every picture to be stored in
its own metafile and processed separately,
GRCONV can handle metafiles which contain several pictures (separated by
'CLEAR WORKSTATION' items). It then produces one pair of output files
for each picture on the metafile.
If the user is not working under VM/CMS, GRCONV first transfers the metafile to
the user's VM account, and then interprets it to produce the required
set of output files. (Care must be taken to ensure that there is no conflict
in having GRCONV write to a VM/CMS mini-disk which is accessed in
write mode by another process. The user should have a write password
on a mini-disk available for access by GRCONV in write-mode.)
Note that GRCONV replaces the command GKSSGML,
and has a wider range of features.
\subsection{Via \LaTeX}
\index{latex@\LaTeX}
As the mechanism used to incorporate pictures into TeX documents depends
not on TeX itself, but on the printer driver used by TeX, no single
mechanism exists to achieve this. Thus, unfortunately, the procedure
varies and is not described here. For details, see \cite{bib-TAC}.
Assuming the hardcopy will be onto a device which supports PostScript
or an IBM-3812, then GRCONV should be used to produce an Encapsulated PostScript
or PSEG38PP/PSEG3820 file as for the SGML/BookMaster case above.
\subsection{\protect\label{sec:gcnref}Naming Pictures}
\Lit{$==>$} Keep ???
\index{GCNAME}
\index{GKS routine!{\protect\tt GCNAME}}
In order to include pictures within a document clearly it is necessary to
have some way in which to designate which picture goes where.
Thus, the pictures require naming in some way.
To accomplish this, a routine called GCNAME has been added to the
GKSGRAL and GKSGRAL-3d libraries.
(It is available also for DECGKS, and in source format in GKSPACK.)
GCNAME outputs a user item containing a name string onto the metafile
and, for example, this is used by GRCONV to generate the file names,
and by GRVIEW to allow the user to choose by name the picture to be edited.
GCNAME should be called to name a picture immediately after the previous
picture delimiter (Clear Workstation) and before any primitives or attributes
for the named frame have been written out. The call is:
\begin{XMP}
CALL GCNAME(WKID, NAME)
\end{XMP}
where WKID is the metafile workstation identifier,
and NAME is a FORTRAN variable or constant of type CHARACTER
and with a length of {\bf seven (7)} characters.
The seven characters must correspond to the 'XXXXXXX' preceeding the 'S'
used in the picture NAME tag used by SGML.
If less than 7 characters are used then GRCONV
will pad the name out with dollar signs (\Lit{'$'}).
Thus, if a picture was named 'FPROJ' by GCNAME, then the corresponding SGML
NAME tag would need to be be \Lit{'FPROJ$$S'}.
This convention is maintained for backwards compatability, although
it is not required by SGML/BookMaster.
Even though not required for other operating systems, in order
to remain compatible with the file naming scheme used by VM/CMS,
GCNAME restricts the character set allowed for GCNAME to upper-case alphabetic,
the digits 0-9, and the dollar sign (\Lit{'$'}).
Lower-case characters are automatically converted to upper-case.
This is also why the number of characters is limited.
In order to avoid getting spurious or empty picture files produced by
GRCONV, application code should issue the Clear Workstation call to
the metafile workstation only if a complete picture really has been written to
the file.
\subsection{\protect\label{sec:epsref}Encapsulated PostScript}
\index{Encapsulated PostScript}
\index{PostScript}
It is possible to use the GKS PostScript driver in order to produce output
files in the format {\bf Encapsulated Postscript}, which is used by
various text-processors (TeX, SGML, BookMaster, etc.) in order to combine
PostScript-encoded data originating from different sources.
Encapsulated Postscript format is similar to standard PostScript but
with some additional header information concerning the picture size.
The PostScript instruction "showpage" which is normally included
at the end of the file in order to tell the interpreter to print the
preceeding information is removed in the Encapsulated format.
This avoids any conflict with the contents of other PostScript files
into which the Encapsulated file may be embedded.
Thus, sending an Encapsulated PostScript file to a printer directly will
not produce any output.
It is possible to use GRCONV to convert a GKS metafile to Encapsulated
PostScript format by specifying the requisite qualifier ('EPSPM', ...).
If the input metafile contains several pictures it is also possible to
use the 'SPLIT' option in order to get one output file per picture.
If a PostScript file is to be produced directly by a GKS application
program then the following applies:
\begin{OL}
\item
Open a GKS PostScript Workstation. The Workstation Type is the same
for both normal and Encapsulated PostScript formats.
\item
As for all drivers, if the connection identifier is less than 100
(see on Page~\pageref{sec:conref}) then the PostScript instructions
are sent directly to the terminal.
(Useful if the terminal supports Display PostScript).
\item
If the connection identifier is greater than 100 but less than 200
(conid~=~100~+~n), then PostScript instructions will be
written to a file in normal PostScript format opened on unit number 'n'.
The file may include several pictures.
\item
If the connection identifier is greater than 200
(conid~=~200~+~n), then PostScript instructions will be
written to a file in Encapsulated PostScript format opened on unit number 'n'.
Only one picture should be included per file.
\end{OL}
\section{Use of Workstation Transformations to Change Picture Size}
\index{workstation transformation}
\index{picture size}
\index{size}
When using GKS from an application program it is not necessary to
specify a workstation transformation to set the size of the output picture.
In this case, the Normalization Viewport will automatically be made to map to
the largest possible square which fits onto the output device display surface.
This is often acceptable for an interactive graphics session.
However, when making a hardcopy image, especially if this
must fit within a document, then the actual size of the picture may
well be very important.
The GKS Workstation Transformation, which is described fully in
on Page~\pageref{sec:gkspr}, allows the actual area required
to be specified in metres.
A brief summary will be given here.
The {\it Workstation Transformation} defines which part of the
Normalized Device Coordinate (NDC) space will
be visible, and where it will appear on the display surface.
It is set by calling the functions Set WorKstation WiNdow and
Set WorKstation ViewPort:
\begin{XMP}
\index{GKS routine!{\protect\tt GSWKWN}}
\index{GKS routine!{\protect\tt GSWKVP}}
CALL GSWKWN(WKID, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSWKVP(WKID, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
Set Workstation Window specifies in NDC coordinates the area of the NDC
space to be output to the device. Set Workstation Viewport specifies in
{\it Display Coordinates} (DC) where on the device the window will appear.
The aspect ratio for the workstation window and the workstation viewport
{\bf must always be the same}.
If they are not, then {\bf the specified transformation is ignored},
and the complete workstation window is displayed on the device
in the correct aspect ratio.
As an example, suppose that the picture occupies the area (wxmin,~wymin)
to (wxmax,~wymax) in World Coordinates. This may be mapped via the
Normalization Transformation to a Viewport (vxmin,~vymin) to
(vxmax,~vymax), and this does not have to preserve the aspect ratio.
This Viewport must now be output onto an A4 sheet so as to fill the width
of the page, assuming an aspect ratio with width~>~height.
\begin{note}
Although the width of an A4 page is 21cm, most output devices do not
permit the full width to be used. Consult the Workstation Description Table
for the device to find out what is the maximum size.
\end{note}
\begin{XMP}
C Set the Window and Viewport for Normalization Tfrm 'trn'
C and select this Normalization Tfrm for future primitives.
C
CALL gswn (trn, wxmin, wxmax, wymin, wymax)
CALL gsvp (trn, vxmin, vxmax, vymin, vymax)
CALL gselnt(trn)
C
C Set the Workstation Window and Workstation Viewport
C Note that 18.9 cm is the maximum width for PostScript Portrait Mode
C (The Workstation Window to map from the Normalization Viewport)
C
CALL gswkwn(wkid, vxmin, vxmax, vymin, vymax)
CALL gswkvp(wkid, 0.0, 0.189,
* 0.0, 0.189*(vymax-vymin)/(vxmax-vxmin))
\end{XMP}
\chapter{GKS-3D Primer}
\index{GKS-3D}
\begin{note}
Whilst in general the information in this Primer is independent of
a particular GKS implementation, this is not entirely the case for
GKS-3D. The reason is that the goal of the Primer is not simply
to describe the functionality of GKS, but to explain in detail
how it may be used via the FORTRAN language binding.
However, as the binding has not yet been completely finalised for
GKS-3D, this chapter explains its use in terms of a
particular implementation, namely GKSGRAL-3D Version 2.0.
The text of the Draft International Standard for the GKS-3D
FORTRAN binding was published at the end of 1988,
with voting to finish by May, 1989.
Discrepancies between the FORTRAN DIS and GKSGRAL-3D Version 2.0 are minor,
and are mentioned at the appropriate places within the chapter.
Only the setting of the viewing parameters and the aspect source
flags are affected, and the text will be revised after the final
voting on the standard is complete.
The changes between VSN 1.1 and VSN 2.0 of GKSGRAL-3D
are described in Appendix on Page~\pageref{sec:vsn2upd}.
\end{note}
\section{Introduction to GKS-3D}
GKS-3D is a pure super-set of GKS designed to handle 3D graphics in
a compatible way. That is to say, a 2D application written to
the GKS standard is guaranteed to run in a GKS-3D environment
without change. However, apart from the usual GKS functions,
GKS-3D provides additional ones to handle 3D primitives,
3D input, and 3D viewing.
As for GKS, the standard is in multiple parts with the language
bindings separate from functional specification.
The FORTRAN binding is defined in \cite{bib-gksftn3}.
All primitives within GKS-3D are deemed to be three dimensional.
Thus, although an application using GKS-3D may make only 2D function
calls, all the 2D primitives will be turned immediately into the
corresponding 3D versions inside the package by the addition of
a Z coordinate. This has several noticeable effects:
\begin{UL}
\item 2D function calls may be slightly slower than 3D ones (using the
same GKS-3D package), as a 2D call will add a Z coordinate
and then make the corresponding 3D function call.
\item 2D applications running on GKS-3D will take more storage space
in WDSS and WISS.
\item A 2D picture stored on a 3D metafile will require more space
than on a 2D metafile. Also, quite clearly, {\bf such a picture could
not be re-interpreted by reading the metafile into a GKS (2D)
implementation}.
\end{UL}
\section{The Drawing Primitives}
\index{GKS3D!drawing primitives}
\index{primitives!3D}
With one addition, GKS-3D supports the same GKS primitive types as
described in chapter on Page~\pageref{sec:dprim}.
Four of the functions are called in a similar way to that for GKS-2D,
but with the addition of a third coordinate:
\index{GKS routine!{\protect\tt GPL3}}
\index{GKS routine!{\protect\tt GPM3}}
\index{GKS routine!{\protect\tt GFA3}}
\index{GKS routine!{\protect\tt GGDP3}}
\begin{DLtt}{123456}
\item[Polyline 3D]CALL GPL3(N, PXA, PYA, PZA)
\item[Polymarker 3D]CALL GPM3(N, PXA, PYA, PZA)
\item[Fill Area 3D]CALL GFA3(N, PXA, PYA, PZA)
\item[GDP 3D]CALL GGDP3(N, PXA, PYA, PZA, PRIMID, IDR, DATREC)
\end{DLtt}
However, the 3D routines for Cell Array (GCA3) and Text (GTX3) have
calling sequences which are substantially changed.
GCA3 is not described here, and those brave enough to peruse
the standards document will see why. Concerning GTX3, it should be
emphasized that this function is only required if it is desired to place
text on a surface which is not parallel to the X-Y plane,
otherwise the 2D text function (GTX) is sufficient.
For example, one would need to use GTX3 to write 'MIGROS' on the
side of a truck drawn with some random orientation.
\index{text!3D}
\index{GKS routine!{\protect\tt GTX3}}
The function call for 3D Text is:
\begin{XMP}
CALL GTX3(PX, PY, PZ, TDX, TDY, TDZ, STR)
\end{XMP}
where the arguments are as follows:
\begin{DLtt}{123456}
\item[PX, PY, PZ]
Reference Point (start point of text string)
\item[TDX/Y/Z(2)]
Two Text Direction vectors (three real arrays of length 2).
\item[STR]
The character string to be output.
\end{DLtt}
For both GTX and GTX3 the character string, STR, is drawn in a plane.
The difference is that in the 3D case this {\it text plane} can be
oriented in space using two {\it text direction vectors}, U and V,
specified by TDX(1), TDY(1), TDZ(1), and TDX(2), TDY(2), TDZ(2).
The text is drawn in a plane perpendicular to the vector formed from
taking the vector product U~x~V,
and the origin of the local coordinate system in which the geometric
text attributes are measured is defined by the reference point, P.
The X axis of the local text coordinate system is parallel to the
first direction vector, U, and the Y axis is perpendicular
to this direction. Thus, the system reduces to the 2D case if
U and V are defined as (1,~0,~0) and (0,~1,~0).
An example program using 3D Text is given in Appendix on Page~\pageref{sec:ex3dv}.
The additional primitive, called {\it Fill Area Set}
(or {\it Fill Area Set 3}), generates a set of polygonal
areas which may be hollow or filled with a uniform colour,
pattern, or hatch style.
Thus, it allows the application to specify regions with holes,
or disjoint regions which must be treated as a single entity.
There are both 2D and 3D versions of Fill Area Set, and the
2D function will be added to the next revision of GKS.
Unlike the Fill Area primitive, Fill Area Set and Fill Area Set 3
have separate attributes, described below, to control the edge
style of the polygons.
The calls are:
\index{fill area set}
\index{GKS routine!{\protect\tt GFAS}}
\index{GKS routine!{\protect\tt GFAS3}}
\begin{XMP}
CALL GFAS(NPTR, PTR, PX, PY)
CALL GFAS3(NPTR, PTR, PX, PY, PZ)
\end{XMP}
where the arguments are as follows:
\begin{DLtt}{123456}
\item[NPTR]
(integer) Number of fill areas in set
\item[PTR(NPTR)]
(integer) Array of pointers to starting elements of lists in
PX(N), PY(N), PZ(N). Total number of coordinates is PTR(NPTR)-1.
\item[XA/YA/ZA(*)]
(real) List of points for all fill areas of the set.
\end{DLtt}
Thus, all the polygons are packed into the arrays, XA(N), YA(N), ZA(N),
and the start point of each polygon is indicated by the pointers in
the array PTR(NPTR).
\begin{note}
Although they are specified in a 3D space, the primitives Text,
Cell Array, Fill Area, and Fill Area Set are all {\bf coplanar}.
It is the responsibility of the application program to ensure that the
coordinates supplied fulfil this condition.
What happens if they are not coplanar is implementation-dependent!
\end{note}
\section{The Output Attributes}
\index{GKS3D!attributes}
\index{attributes!3D}
As for the primitives, GKS-3D uses almost the same output attributes as
GKS-2D (described in Chapter on Page~\pageref{sec:attrbs}), but with the
three following additions:
\begin{OL}
\item Extra attributes are required for the new Fill Area Set primitive.
\index{view index}
\index{GKS3D!view index}
\index{attributes!view index}
\item The {\it View Index} attribute.
This is analogous to the Normalization Transformation index attribute,
and specifies which viewing transformation is to be used to process
the primitive.
\item The {\it Hidden Line/Hidden Surface} (HLHSR) attribute specifies
which HLHSR algorithm should be used to process the primitive
(if HLHSR is supported by the workstation).
\end{OL}
The Fill Area Set primitive uses the same attributes as Fill Area to
control the interior of the polygons (see section on Page~\pageref{sec:attlst}),
plus the following which allow independent control of the edges:
\index{fill area set}
\index{GKS routine!{\protect\tt GSEDFG}}
\index{GKS routine!{\protect\tt GSEDT}}
\index{GKS routine!{\protect\tt GSEWSC}}
\index{GKS routine!{\protect\tt GSEDCI}}
\begin{DLtt}{123456}
\item[EDFLAG]
the Fill Area Set edge flag (ASF 1). Set by GSEDFG(EDFLAG).
EDFLAG may take the vales 'GON' or 'GOFF'.
\item[EDTYPE]
the Fill Area Set edge type (ASF 2). Set by GSEDT(EDTYPE).
EDTYPE is an integer value which is workstation-dependent.
\item[EDWSF]
the Fill Area Set edge width scale factor (ASF 3). Set by GSEWSC(EDWSF).
The value of EDWSF is a real number which modifies the width of the line
used to draw the edge of the Fill Area Set.
\item[EDCI]
the Fill Area Set edge colour index (ASF 4). Set by GSEDCI(EDCI).
\end{DLtt}
The numbers in brackets are, for the GTS-GRAL implementation,
the indices into the integer array ASFLST(4)
used to set the corresponding Attribute Source Flags by calling:
\index{GKS routine!{\protect\tt GASF3}}
\index{attribute source flags}
\begin{XMP}
CALL GASF3(ASFLST)
\end{XMP}
However, the DIS FORTRAN binding uses GASF3 to set all the
Attribute Source Flags in one go by defining the array ASFLST to be of
length 17, where elements 1 to 13 correspond to the 2D case and the last
four elements are those listed here.
Note that the Fill Area Set
primitive is rendered using two independent sets of attributes,
one for the interior, and one for the edge. This separate set of
edge attributes also has its own attribute bundle, selected by calling
GSEDI, plus a corresponding routine to Set the EDge Representation:
\index{GKS routine!{\protect\tt GSEDI}}
\index{GKS routine!{\protect\tt GSEDR}}
\begin{XMP}
GSEDI(EDI)
and
GSEDR(WKID, EDI, EDFLAG, EDTYPE, EDWSF, EDCI)
\end{XMP}
\section{Viewing in 3D}
\index{GKS3D!viewing}
\index{transformation!3D viewing}
\index{viewing pipeline}
Setting up the {\it Viewing Parameters} is undoubtedly the most
complicated part of any 3D graphics system. When primitives are output
to a workstation they (conceptually) pass through a series of processes
called the {\it Viewing Pipeline} before they finally reach the
display surface. This pipeline is briefly described below in order that
the reader is aware of the complete process (see \ref{fig:pipe}).
The transformations will then be covered in more detail.
\begin{OL}
\item The primitives are transformed by the {\it Normalization
Transformation} from World Coordinates (WC3) to Normalized
Device Coordinates (NDC3), which are always in the range [0.0, 1.0].
This transformation is composed of a translation and change of scale,
but no rotation. GKS-3D allows for the existence of many World
Coordinates systems, and their corresponding Normalization
Transformations are numbered from 0 upwards. Normalization
Transformation 0 always corresponds to the identity matrix.
Normalization in 3D is exactly analogous to the 2D case described
in section on Page~\pageref{sec:nortrn}.
\item Primitives which are stored in segments are also processed by the
{\it Segment Transformation} before proceeding to the next stage.
In the 3D case this requires a 3~x~4 matrix which is
described below. The segment transformation maps NDC3 to NDC3,
and includes scaling, rotation, and translation.
\item Having assembled the components in a unique NDC3 space,
primitives may next be clipped to a box to remove extraneous
information. This is called the {\it Normalization Clip}, and may be
switched on or off using the {\it Normalization Clip Flag}.
\item The primitives are now 'viewed' from some arbitrary direction.
The {\it View Orientation Transformation} performs a rotation only
to take Normalized Device Coordinates to View Reference Coordinates
(VRC3). The application is free to calculate the corresponding matrix
itself, or to use a utility routine which is described below.
\item The {\it View Mapping (Projection) Transformation} next takes
View Reference Coordinates to Normalized Projection Coordinates
(NPC3)
in order to provide parallel or perspective projection of the image.
As for the View Orientation Transformation, the application is free
to calculate the required matrix using its own algorithm, or to call a
utility function.
\begin{figure}[h]
\caption{The GKS-3D Viewing Pipeline}
\label{fig:pipe}
\end{figure}
\item At this point the {\it View Clip} takes place.
It is positioned at this stage in the pipeline so that the clip box may be
defined as a rectangular parallelepiped with its sides parallel to
the axes of the NPC3 system, and thus the clipping algorithm is
more efficient. The View Clip is controlled by three {\it Clip Flags}
which allow clipping to be turned on or off separately for the
front plane, back plane, and window.
\item Finally, the {\it Workstation Transformation} takes
Normalized Projection Coordinates to Display Coordinates (DC3) in order
to position the projected image in the device coordinate space. It
preserves the aspect ratio, and includes a clipping operation which
cannot be disabled. As their clip faces are parallel, the View Clip and
Workstation Clip are usually combined internally for efficiency. DC3
coordinates may be in metres or raster units. The Workstation Window
limits are [0,1]x[0,1]x[0,1].
\end{OL}
A good implementation of the graphics pipeline will attempt to combine
as many as possible of the stages in the pipeline using matrix
concatenation in order to reduce the amount of computation necessary.
\subsection{The Normalization Transformation}
\index{normal!transformation}
\index{transformation!3D normalization}
As in the 2D case, this is specified by a Window volume in World
Coordinates, and a Viewport volume in Normalized Device Coordinates.
The Normalization Clip is controlled as for GKS-2D
(see section on Page~\pageref{sec:nrmclp}).
The calls are:
\index{GKS routine!{\protect\tt GSW3}}
\index{GKS routine!{\protect\tt GSV3}}
\begin{XMP}
CALL GSW3(TNR, WN)
CALL GSV3(TNR, VP)
\end{XMP}
where WN and VP are real arrays of dimension 6 containing (XMIN,
XMAX, YMIN, YMAX, ZMIN, ZMAX).
\subsection{The View Orientation Transformation}
\index{view orientation transformation}
\index{transformation!3D view orientation}
The {\it View Orientation Transformation} algorithm provided by a
utility function in the GKS-3D standard performs a rotation in three
dimensions in order to take Normalized Device Coordinates to View
Reference Coordinates (VRC3), where the axes are labeled U, V and N
(see \ref{fig:vrc}).
The definition of the UVN system requires the application to specify:
\begin{OL}
\item The {\it View Reference Point} (VRP), which is a point on or
near the object to be viewed, and is the origin of the VRC3 system.
\item The {\it View Plane Normal} (VPN), which points from the
View Reference Point towards the eye point. The VPN is the third axis
of the VRC3 system, and the plane perpendicular to the View Plane Normal
through the View Reference Point is called the {\it View Reference Plane}.
\item The {\it View Up Vector} (VUV), which defines the direction
to be considered as 'UP' within the View Plane.
It is the second axis of the VRC3 system.
{\bf Clearly, the View Plane Normal and the View Up Vector must not
be defined to be collinear.}
\end{OL}
\index{window}
\index{viewport}
Thus, in the View Reference system, the axis N is along the
View Plane Normal, the axis V is the projection of the View Up Vector
onto the View Reference Plane, and the axis U is constructed to form the
third axis of a right-handed coordinate system.
The default transformation sets the identity matrix.
The limits of the VRC3 system are [0,1]x[0,1]x[0,1].
\begin{figure}[h]
\caption[The GKS-3D View Reference System]%
{The GKS-3D View Reference System.
The View Reference Point, defined in World Coordinates,
should be situated near the object to be viewed.
The View Plane Normal is directed at the eye point.
}
\label{fig:vrc}
\end{figure}
\subsection{The View Mapping (Projection) Transformation}
\index{view mapping transformation}
\index{projection transformation}
\index{transformation!3D view mapping}
The {\it View Mapping (Projection) Transformation} provided by a
utility function in the GKS-3D standard takes View Reference Coordinates
to Normalized Projection Coordinates (NPC3), and the projection may be
either parallel or perspective (see \ref{fig:proj}).
The default View Mapping Transformation sets the identity matrix.
The transformation maps the {\it View Volume} in VRC3 space to a
rectangular parallelepiped in Normalized Projection Coordinates.
The horizontal and vertical boundaries of the View Volume are specified
by the projectors drawn from the {\it Projection Reference Point}
(the centre of projection) to the corners of the {\it View Window},
which is a rectangle parallel to the View Reference Plane,
and with edges parallel to the U and V axes.
The View Window cuts the N axis of the VRC3 system at the
{\it View Plane Distance} (VPD) from the View Reference Point.
The hither (front) and yon (back) boundaries of the View volume are
specified by the {\it Front Plane} and the {\it Back
Plane}, which are planes parallel to the View Reference Plane at
distances from the View Reference Point called the {\it Front Plane
Distance} (FPD) and the {\it Back Plane Distance} (BPD).
The rectangular parallelepiped in NPC3 to which the View Volume is
mapped, called the {\it Projection Viewport Limits}, is
specified by the two points (XMIN, YMIN, ZMIN) and (XMAX, YMAX, ZMAX).
Although the View Mapping operation maps a volume to a volume,
which is slightly different from the usual idea of projection onto a plane,
a little thought should (hopefully) convince the reader that the effect is
equivalent. Consider looking at the result along the N axis; the effect is
that the contents of a truncated pyramid are distorted to map into a
rectangular volume, and thus objects closer to the viewer will be magnified.
For parallel projection, the projectors are parallel to a line drawn from
the Projection Reference Point to the centre of the View Window,
and thus the PRP should be set in the middle of the View Window
to produce an orthographic projection.
In this case the actual Z value of the PRP is unimportant, so long as it is
not identical to the View Plane Distance.
The {\it View Clip} takes place at the edges of the View
Clipping Limits according to the setting of the clipping indicators.
There are three of these: the x-y clipping indicator, the front clipping
indicator, and the back clipping indicator.
The default clipping limits are set to
[0,1]x[0,1]x[0,1] and all clipping indicators set to clip on ('GCLIP').
It would normally be the case that the View Clipping Limits and the
Projection Viewport Limits were set to the same values, but this is
not a requirement. If not set to be co-incident, there is clearly the
possibility for an application to clip away all of the image by mistake!
\begin{figure}[h]
\caption[The GKS-3D Projection System]%
{The GKS-3D Projection System.
The figure shows the definition of the View Volume with
Perspective Projection.
}
\label{fig:proj}
\end{figure}
\subsection{Setting the 3D View Representation}
\index{view representation in 3D}
Having described the concepts, this section will attempt to explain
how all these parameters are actually set. This is an area of
the GKS-3D standard which was modified fairly late,
and release 2.0 of GKSGRAL-3D still took place before the publication
of the Draft International Standard for the FORTRAN binding
\cite{bib-gksftn3}.
As for the Normalization Transformation, there may be more than one
Viewing Transformation (actually, combined Viewing and Projection
Transformation plus Clip), and the different transformations
are specified by a {\it View Index}.
However, unlike the Normalization Transformation, which applies to all
primitives no matter on which workstation they are displayed,
the Viewing Transformation is workstation-dependent, and so the
same VieW Index (VWI) may produce a different effect on each active display.
The View Index is set using the routine:
\index{GKS routine!{\protect\tt GSVWI}}
\index{view index}
\index{GKS3D!view index}
\index{attributes!view index}
\begin{XMP}
CALL GSVWI(VWI)
\end{XMP}
Following this call all primitives will be transformed according to the
parameters specified by viewing attribute bundle VWI, assuming that
the deferral mode set has allowed the workstation to be brought
up-to-date. The default viewing attributes,
corresponding to VWI~=~0,
define identity matrices for the View Orientation and View Mapping
transformations, and place the clip limits at the boundary of NPC3 space.
As indicated, the attribute values contained in
the viewing attribute bundle specified by VWI must be defined separately
for each workstation using the call:
\index{GKS routine!{\protect\tt GSVWR}}
\begin{XMP}
CALL GSVWR(WKID, VWI, VWM, PRM, VCLP, CLW, CLB, CLF)
\end{XMP}
\begin{DLtt}{123456}
\item[WKID]Workstation Identifier
\item[VWI]The View Index
\item[VWM]The View Orientation Matrix (a~4~x~4 real array),
which may be calculated by the function GEVVWM
\item[PRM]The Projection (View Mapping) Matrix
(a~4~x~4 real array),
which may be calculated by the function GEVPJM
\item[VCLP]The View Clipping Limits (XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\item[CLW]Clip Indicator for Window Clipping ('GNCLIP', 'GCLIP')
\item[CLB]Clip Indicator for Back Plane Clipping ('GNCLIP', 'GCLIP')
\item[CLF]Clip Indicator for Front Plane Clipping ('GNCLIP', 'GCLIP')
\end{DLtt}
The utility functions provided to evaluate the matrices are EValuate
VieW orientation Matrix and EValuate ProJection (View Mapping) Matrix:
\index{GKS routine!{\protect\tt GEVVWM}}
\begin{XMP}
CALL GEVVWM(VRPX, VRPY, VRPZ, VUPX, VUPY, VUPZ,
VPNX, VPNY, VPNZ, CSW, ERR, VWM)
\end{XMP}
\begin{DLtt}{123456}
\item[VRPX/Y/Z]The View Reference Point in NDC3 or WC3
\item[VUPX/Y/Z]The View Up Vector in NDC3 or WC3
\item[VPNX/Y/Z]The View Plane Normal in NDC3 or WC3
\item[CSW]Switch specifying whether the vectors are given in
World Coordinates or Normalized Device Coordinates ('GWC' or 'GNDC')
\item[ERR](out) Error indicator
\item[VWM](out) View Matrix (a~4~x~4 real array)
\end{DLtt}
\index{GKS routine!{\protect\tt GEVPJM}}
\begin{XMP}
CALL GEVPJM(UMIN, UMAX, VMIN, VMAX, PRVP, PROTYP,
PRPU, PRPV, PRPN, VPD, BPD, FPD, ERR, PRM)
\end{XMP}
\begin{DLtt}{123456}
\item[U, V](MIN,MAX) The limits of the View Window on the
View Reference Plane measured relative to the View Reference Point.
UMIN, VMIN is the bottom left corner, UMAX, VMAX is the top right corner.
\item[PRVP]The Projection Viewport Limits (a 6 element real array
containing XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\item[PROTYP]The Projection Type flag integer ('GPARL' or 'GPERS')
\item[PRUP/V/N]The Projection Reference Point.
\item[VPD]The View Plane Distance from the View Reference Point
along the N axis.
(Note that the View Reference Point does {\it not} have to be
contained in the View Plane).
\item[BPD, FPD]The Back and Front Plane Distances from the
View Reference Point along the N axis. FPD must be greater than BPD.
\item[ERR](out) Error indicator
\item[PRM](out) Projection Matrix (a~4~x~4 real array)
\end{DLtt}
\subsection{Workstation Transformation}
\index{workstation!transformation 3D}
\index{transformation!3D workstation}
This is specified by a {\it Workstation Window} volume in
Normalized Projection Coordinates, and a {\it Workstation
Viewport} volume in Device Coordinates.
The Workstation Transformation always preserves the aspect ratio,
and the default transformation will be used if the window or viewport
volumes are not correctly set.
\index{GKS routine!{\protect\tt GSWKW3}}
\index{GKS routine!{\protect\tt GSWKV3}}
\begin{XMP}
CALL GSWKW3(WKID, WKWN)
CALL GSWKV3(WKID, WKVP)
\end{XMP}
where WKWN and WKVP are real arrays of dimension 6 containing
(XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\index{GKS routine!{\protect\tt GQDVOL}}
To inquire the range of device coordinates corresponding to a particular
workstation type, it is possible to call the function GQDVOL
(inQuire Display VOLume).
\begin{XMP}
CALL GQDVOL (WTYPE, ERRIND, DCUNIT, RX, RY, RZ, LX, LY, LZ)
\end{XMP}
The routine returns the units in which the display surface
is measured (DCUNIT), and also the maximum x, y, and z values
in each direction.
Some devices, for example hardcopy plotters, are measured in metres
(DCUNIT='GMETRE'), so the routine will return the actual size of the
device. Other devices, for example graphics terminals,
will be defined in raster or some other units (DCUNIT='GOTHU').
RX, RY, RZ are real device coordinates,
and LX, LY, LZ are in integer raster units.
The use of GQDVOL is analogous to the use of GQDSP,
which is described in section on Page~\pageref{sec:wstntfm}.
\subsection{But I don't see anything!}
The first (and second and maybe third) time one tries out a 3D application,
even after having checked the code very carefully, there may be nothing
on the screen. The following is a list of possible pitfalls:
\begin{OL}
\item Is the View Reference Point really on or near the object to be viewed?
\item Is it defined in WC or NDC, and does this match the coordinate switch?
\item Is the View Plane Normal pointing the correct way?
A classical bug is to look 180 degrees in the wrong direction.
GKS-3D uses a Right-Handed system, so if the object to be viewed is
at the origin, and is viewed along the Z axis, then one
{\it looks} in the {\bf negative} Z direction, but the VPN
{\it points} in the {\bf positive} Z direction (0.0, 0.0, 1.0).
\begin{note}
As only the direction and not the length of View Plane Normal vector
matters, one can simply use the coordinates of the eye point to specify this
vector, so long as the View Reference Point is placed at the origin.
\end{note}
\item Assuming one wants an orthographic projection, is the Projection
Reference Point in the middle of the View Window?
\item Has the View Window (UMIN, VMIN), (UMAX, VMAX)
{\it really} been set around the object?
\item Have the Front Plane and Back Plane distances been set correctly
in front and behind the object?
\item Is the aspect ratio of the Normalization Viewport volume the same
as that of View volume?
\item Are the Clipping Limits which define the projection parallelepiped
correctly set within the NPC system limits? Do the Clipping Limits
and Projection Viewport Limits match?
{\bf It is always safer to start with clipping switched off!}
\item Have the Workstation Window and Workstation Viewport been set
to the same aspect ratio?
\end{OL}
\section{Segments}
\index{segment!3D}
\index{transformation!3D segment}
\index{GKS3D!segments}
Segmentation operates in GKS-3D in the same way as for GKS,
described in section on Page~\pageref{sec:segtfm}, except that the segment
transformation matrix is defined to be 3~x~4.
(3~x~3 for scaling and rotation plus 3~x~1 for
translation).
Thus, the 2D utility routines which calculate a segment
transformation matrix, GEVTM and GACTM, are modified as follows:
\index{GKS routine!{\protect\tt GEVTM3}}
\index{GKS routine!{\protect\tt GACTM3}}
\begin{XMP}
CALL GEVTM3(X0, Y0, Z0, DX, DY, DZ,
ROTX, ROTY, ROTZ, FX, FY, FZ, SW, MXOUT)
CALL GACTM3(MXIN, X0, Y0, Z0, DX, DY, DZ,
ROTX, ROTY, ROTZ, FX, FY, FZ, SW, MXOUT)
\end{XMP}
Similarly to the 2D case, GEVTM3 evaluates a matrix (MXOUT),
whilst GACTM3 accumulates changes to an existing matrix (MXIN).
Both routines require the definition of:
\begin{DLtt}{123456}
\item[X0, Y0, Z0]
(real) a fixed reference point about which 3D rotations take place.
\item[DX, DY, DZ]
(real) a translation (or shift) vector.
\item[ROTX, Y, Z]
(real) angles of rotation about the X, Y, and Z axes.
\item[FX, FY, FZ]
(real) X, Y, and Z scale factors.
\item[SW]
(enumerated) a switch specifying whether the reference point and
shift vector are given in World Coordinates
or Normalized Device Coordinates ('GWC' or 'GNDC').
\item[MXOUT]
(real) 3~x~4 output matrix composed in the
order: scale, rotate, shift.
In the case of GACTM, the matrix MXIN is pre-concatenated with that
formed from the scale, rotate, and shift parameters, so
MXOUT~=~SHIFT~*~ROTATE~*~SCALE~*~MXIN.
\end{DLtt}
Once the transformation matrix has been evaluated, it may then be
set in the segment by calling the routine:
\index{GKS routine!{\protect\tt GSSGT3}}
\begin{XMP}
CALL GSSGT3(SGNA, MTX)
\end{XMP}
To INsert a SeGment into the output stream GINSG becomes:
\index{GKS routine!{\protect\tt GINSG3}}
\begin{XMP}
CALL GINSG3(SGNA, MTX)
\end{XMP}
Because GKS-3D is upwards compatible to GKS, one can still use the
2D versions of these routines.
In this case, 2~x~3 matrices will be automatically filled-out
to 3~x~4 by the suitable additions of 0s and 1s.
\begin{note}
GKS segment transformations are provided in order to orient the
contents of segments with respect to the coordinate system in which
their primitives were originally specified. In most cases {\bf it is
extremely inefficient to modify the transformations in each segment
in order to view a scene from a different direction}. The viewing
transformation should be used for this purpose
\end{note}
\section{Graphical Input}
\index{GKS3D!input}
\index{input!for 3D}
As only the Locator and Stroke logical devices return coordinate
values, only these two have 3D versions of the routines used
for the three input modes. Thus, for request mode, the calls are:
\begin{XMP}
CALL GRQLC3(WKID, LCDNR, STAT, TNR, VWI, PX, PY, PZ)
and
CALL GRQSK3(WKID, SKDNR, STAT, N, TNR, VWI, NP, PX, PY, PZ)
\end{XMP}
Where, apart from the extra dimension, PZ, the only difference to the
2D calls is the addition of the View Index, VWI.
(Note that as for TNR, VWI is an output parameter.)
This corresponds to the index of the viewing attribute bundle used to
convert the position(s) back from NPC3 coordinates to NDC3 coordinates.
Of course, when using a physical 2D device, it is awkward for the user
to provide the third coordinate for 3D input.
Although only Locator and Stroke have 3D functions to obtain input,
all six logical devices have 3D versions of the initialization
functions. For example, these allow the echo areas to be positioned
in space, rather than in a plane. They will not be discussed
further in this Primer.
\section{GKS-3D Metafile}
\index{metafile!for 3D}
\index{GKS3D!metafile}
\index{Appendix E metafile}
\index{CGM}
As for GKS, the GKS-3D standard has an Appendix E metafile.
The logical format of the 2D and 3D Appendix E metafiles are the
same. However, the contents are incompatible, as in one
case points are represented by two values, and in the other by
three values. The two types of metafile are clearly distinguishable
by inspecting the metafile header record. In all other respects,
the control and usage of the 2D and 3D metafiles are the same.
The Computer Graphics Metafile, CGM, will not initially have a 3D
version. Therefore, if a CGM metafile driver is added to GKS-3D,
the output could contain only a 2D projection. This would be sufficient
to make a hardcopy, but only with the viewing parameters chosen
when the metafile was created.
\begin{note}
The GKS-3D Appendix E metafile has never been implemented by
GTSGRAL. Nevertheless, an additional output workstation is
provided to produce a 2D
metafile by carrying out the viewing and projection operations before
outputting data to the file. This feature is useful for making hardcopies.
\end{note}
\chapter{\protect\label{sec:refhint}General Hints for GKS Use}
\index{hints}
\section{System and Implementation Dependencies}
As mentioned elsewhere in this document certain features of GKS and GKS-3D
are system or implementation dependent. System dependencies are described
in the manual appropriate for the implementation in use.
The principle implementation dependencies to watch for are text fonts and
fill area hatch styles
but it is also possible to have difficulties by inadvertently trying to
exceed some maximum value, such as a table length.
GKS has many inquiry function which may be used to discover the current
and/or maximum value of various parameters,
and it is not the intention of the Primer to describe all of them.
However, one should take note of the following routines:
\index{GKS routine!{\protect\tt GQWKM}}
\index{GKS routine!{\protect\tt GQMNTN}}
\index{GKS routine!{\protect\tt GQLWK}}
\index{GKS routine!{\protect\tt GQLWK3}}
\index{GKS routine!{\protect\tt GQSGS}}
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQWKM(ERRIND, MXOPWK, MXACWK, MXWKAS)
CALL GQMNTN(ERRIND, MAXTNR)
CALL GQLWK(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI, MCOLI)
or
CALL GQLWK3(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI,
MEDBTE, MCOLI, MVTE)
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
where the parameters are as follows:
\begin{DLtt}{123456}
\item[WTYPE]workstation type (input parameter)
\item[ERRIND]error number
\item[MXOPWK]maximum number of simultaneously open workstations
\item[MXACWK]maximum number of simultaneously active workstations
\item[MXWKAS]maximum number of workstations associated with a segment
\item[MAXTNR]maximum normalization transformation number
\item[MPLBTE]maximum number of polyline bundle table entries
\item[MPMBTE]maximum number of polymarker bundle table entries
\item[MTXBTE]maximum number of text bundle table entries
\item[MFABTE]maximum number of fill area bundle table entries
\item[MPAI]maximum number of pattern indices
\item[MEDBTE]maximum number of edge bundle table entries
\item[MCOLI]maximum number of colour indices
\item[MVTE]maximum number of view table entries
\item[NSGP]maximum number of segment priorities
\end{DLtt}
There is unfortunately no function provided to inquire the maximum available
number of segments or the maximum available segment name so one must consult
the relevant documentation.
\section{Integrating separately written modules of code.}
As is the case when any independent software modules are combined into a
single program, care must be taken that on entry a module saves the
state of the current environment and sets up its own defaults.
The original environment must then be restored on exiting from
the module. This applies to saving and restoring registers when
making a subroutine call no more than saving and restoring (parts of) the
GKS State List and Workstation State Lists when entering a graphics module.
For example, two modules of graphics code may use the same Normalization
Transformation indices. If module B sets different windows and viewports
than module A, then on re-using routines in A after calling module B the
transformations will no longer produce the expected results.
GKS provides a mechanism to handle this situation in the form of a large set
of inquiry functions. These enable a module of code to inquire at run time the
values of those parameters it intends to modify in order that they may
be correctly restored afterwards. In particular, functions exist
to inquire attribute values and bundle table entries, values of the
aspect source flags, and the normalization and viewing (for GKS-3D)
transformations corresponding to a particular normalization or view index.
As an example:
\begin{XMP}
----
REAL WINDOW(4)
REAL VIEWPT(4)
----
C Inquire current window and viewport for Transformation Number X
C
CALL GQNT(TNX, ERRIND, WINDOW, VIEWPT)
CALL GSWN(TNX, WXMINA, WXMAXA, WYMINA, WYMAXA)
----
C Restore window which was modified for Transformation Number X
C
CALL GSWN(TNX, WINDOW(1), WINDOW(2), WINDOW(3), WINDOW(4))
----
\end{XMP}
If several modules of code which are to be combined use GKS segments then
clearly they must not both attempt to use the same segment names.
Either one can agree before hand on the allocation of a range of names
to each module, or else code can be written which assigns segment names
at run time after checking that the values are not in use
(see section on Page~\pageref{sec:segnam}).
\subsection{Using GKS-3D libraries for GKS (2D) applications}
As GKS-3D is a super-set of GKS it is possible to run a GKS 2D application
linked to a GKS-3D library. However, if a 2D code module is incorporated
into a larger 3D program then the programmer must be aware that primitives
will be bound to the current View Index {\bf as well as} the current
Normalization Transformation Index, possibly entailing 3D transformations
plus view clipping.
Thus, to be sure that the code within the 2D module will behave as if linked
to a 2D library the View Index should be set to 0 (identity matrix) in the
3D code before calling the 2D module.
This can not be done in the 2D module itself, as otherwise the linker would give
an error when linking to GKS which does include the 3D function GSVWI.
\section{\protect\label{sec:refintw}Plotting numbers as text strings}
For FORTRAN programmers it is possible to use the {\it Internal Write}
construct to convert numeric variables to character strings for output as
text primitives or via GMSG:
\begin{XMP}
REAL rvar
CHARACTER str*11
----
C Set Variable
rvar = v1 * v2 / v3
C Convert to a character string
WRITE(str, '(''RVAR= '',F5.1)') rvar
CALL GMSG(wkid, str)
----
\end{XMP}
\section{\protect\label{sec:gkspref}GKSPACK}
\index{GKSPACK}
GKSPACK contains routines which may be placed into several categories.
\begin{UL}
\item
One category of routines is intended to ease the use of GKS for
those people with simple applications who do not need further GKS features,
NAG graphics users for example. These routines are built on top
of GKS and are not implementation dependant. They include
routines to initialize and stop GKS (GCINIT, GCSTOP),
to ask the user for input (GPRMPT), etc.
\item
The next category of routines provide facilities to allow applications
which must run on several GKS implementations to obtain information
about workstation types and connection identifiers from a data file.
This avoids having to ask the user interactively to supply implementation
dependant values. These routines include GCGIMP, GCGWTC, etc.
\item
Some routines provide access to GKS features which might be awkward to use,
or which may be implementation dependant. For example, use of GDPs,
or the construction of menus. GKSGRAL includes a large number of utility
routines of this type starting with the sentinel characters {\bf GU},
and GKSPACK contains CERN-written emulations of many of them for
use with other GKS implementations.
\item
Another category of routines is for non-standard GKS utilities which allow the
user to perform specific actions not foreseen by the GKS standard, and which may
require internal knowledge of the GKS implementation.
Thus, it may not be possible to implement these routines for GKS
implementations other than the one from GTS-GRAL if access to the source
code is not available.
The principal examples of routines in this category, heavily used
at CERN by PAW for instance, are GCATOG and GCGTOA. These are used to switch
the graphics terminal between graphics and alphanumeric mode in order for
applications to intermix graphics and Fortran I/O on the same device.
\end{UL}
For various technical reasons it has been decided to include entry points
for all routines in GKSPACK in the GTS-GRAL libraries%
\footnote{At the time of preparing this document there
is a possibility that there may be
a delay in introducing a few of the GKSPACK routines into
the Apollo library due to the change over to SR~10.}
maintained by CERN.
This means that users of GKSGRAL do {\bf not} need to link to a separate
library in order to have access to GKSPACK routines, which is a change to the
previous situation when it was necessary to link applications
also with the CERN library 'NGRAFLIB'.
For users of other GKS implementations a PAM file, named GKSPACK,
is available.
Initially, apart from GKSGRAL, this has support only for DECGKS.
The PAM file distributed by the CERN Program Library will be
included also on the GTS-GRAL distribution tape, and is available at CERN
as follows:
\begin{DLtt}{123456}
\item[IBM:]via \Ucom{GIME CERNPAMS}
\item[VXCERN:]in \Lit{CERN_ROOT:[PAM]GKSPACK.PAM}
\item[Apollo:]in \Lit{/cern/pro/pam/gkspack.pam}
\item[UNIX:]in \Lit{$CERN/pro/pam/gkspack.pam}
\end{DLtt}
The compiled version for DECGKS is available at CERN:
\begin{DLtt}{123456}
\item[VXCERN:] \Lit{GKS_ROOT:[LIB]GKSPACK_DEC.OLB}
\end{DLtt}
As mentioned above, GKSPACK includes some routines for which it may not
be possible to produce correctly working versions for implementations
other than GKSGRAL.
For example, this is the situation for GCATOG and GCGTOA with DECGKS.
In these cases GKSPACK contains dummy routines with the correct
calling sequences, but which write out an error message to a file
named GKSPACK.ERR.
In fact, for GCATOG and GCGTOA, calling the dummy routines
when using a terminal%
\footnote{As opposed to a using a workstation where the
Fortran and graphics I/O are directed at different windows.}
actually stops the application program as well. This is because if the
application mixes Fortran I/O and graphics without using correctly
working versions of GCATOG and GCGTOA then probably the terminal will
block and need to be re-set.
(If used via VM/CMS this also can block the communications.)
The GKSPACK routines which provide access to the implementation
dependant values for workstation types and connection identifiers
require access to a data file.
Examples of this file are distributed with GKSPACK,
and at CERN are available as follows:
\begin{DLtt}{123456}
\item[IBM]The file is called \Lit{GKS$IMPL DAT}.
A GTS-GRAL GKS version is available on the 'Q' disk, but users may provide
their own if they wish.
\item[VAX]The file is available via the Logical Name
\Lit{GKS_IMPLEMENTATION}.
Both GTS-GRAL and DECGKS versions of the file exist,
and users must assign the one they wish to use.
\begin{XMP}
GKS_ROOT:[DAT]GKS_IMPLEM.GTS
GKS_ROOT:[DAT]GKS_IMPLEM.DEC
\end{XMP}
\item[UNIX]The file is accessed via \Lit{$GKS_IMPLEM},
which must be defined as an environment variable.
A GTS-GRAL GKS version called \Lit{gks_implem.gts} is available in
\Lit{$gkshome}, or in \Lit{/cern/gks/pro/dat}.
\end{DLtt}
An example of such a file would be:
\begin{XMP}
DECGKS
NB WKTYPES 5
PSTSCR 61 9
T4107 82 0
VT340 17 0
VXUIS 41 0
WISS 5 1
\end{XMP}
\subsection{List of routines}
\begin{OL}
\item \Lit{CALL GCATOG(WKID)} or \Lit{CALL GCGTOA(WKID)}
\index{GKSPACK!{\protect\tt GCATOG}}
\index{GKS routine!{\protect\tt GCATOG}}
\index{GKSPACK!{\protect\tt GCGTOA}}
\index{GKS routine!{\protect\tt GCGTOA}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID]GKS workstation identifier of the terminal (INTEGER).
\end{DLtt}
\Lit{GCGTOA} and \Lit{GCATOG} work only on GKSGRAL with the CERN-supplied
driver versions as the routines require modifications to the driver code.
The DECGKS version tests the workstation type; if it is VXUIS or VXXW
it does nothing, otherwise it writes an error message on the file
GKSPACK.ERR.
The routines change a terminal from graphics to alpha mode (GCGTOA)
and from alpha to graphics mode (GCATOG). The terminal must be an
activated GKS workstation.
Thus they allow the application to perform FORTRAN I/O to
the terminal during a graphics session. The effect on the terminal
depends on its capabilities. For example, as Pericom PG terminals
do not have a dialog area, calling GCGTOA causes the bell to ring
and the program then halts until the operator presses return.
This gives the user time to look at the image drawn in graphics
mode before switching the screen to the alpha-numeric bit plane.
However, on terminals with a dialog area (e.g. Pericom
MG series and Tektronix 4107 compatible terminals),
the FORTRAN I/O will appear immediately overlayed with the graphics.
After the FORTRAN I/O has been performed the application
{\bf must} call GCATOG to reset the graphics environment.
Note that GCGTOA returns immediately, without waiting for a user
action. Thus, if one wishes the program to wait until the user has read
a message and is ready to continue, then it is essential to include
a READ statement, as the following program fragment illustrates:
\begin{XMP}
CALL GCGTOA (WKID)
WRITE(*,*) ' GIVE X COORDINATE: '
C Wait for a reply
READ (*,*) X
CALL GCATOG (WKID)
\end{XMP}
\item {\bf CALL GCCDWN(WKID, RWIN, TITLE)}
\index{GKSPACK!{\protect\tt GCCDWN}}
\index{GKS routine!{\protect\tt GCCDWN}}
This routine is described fully in section on Page~\pageref{sec:vstnref}.
The DECGKS version is a dummy.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Identifier
\item[RWIN (R*4)]Window Size
\item[TITLE (C)]Window title
\end{DLtt}
\item {\bf CALL GCINIT(IDTERM, TERMTP, IDFILE, LUFILE, KERFIL)}
\index{GKSPACK!{\protect\tt GCINIT}}
\index{GKS routine!{\protect\tt GCINIT}}
{\bf Input:}
\begin{DLtt}{123456}
\item[IDTERM]GKS workstation ID for terminal (INTEGER).
If $\leq0$ or in batch no terminal is assigned.
\item[TERMTP]GKS terminal workstation type (INTEGER).
Note that this number depends on the terminal {\it and} the
GKS implementation you are using.
\item[IDFILE]GKS workstation ID for disk file output (INTEGER).
No file is output if $\leq0$.
\item[LUFILE]FORTRAN logical unit number for disk file output (INTEGER).
Not used if IDFILE $\leq0$.
\item[KERFIL]FORTRAN logical unit number for the GKS error file (INTEGER).
\end{DLtt}
GCINIT provides an easy initialization of GKS for interactive or
batch applications.
If TERMTP=0 GCINIT will prompt the interactive user for the
terminal type and connection ID.
LUFILE and KERFIL are Fortran logical unit numbers and no file names
are assigned as these can easily be provided by the user.
Note that the logical unit numbers 91, 92, 93 are reserved for
GKSGRAL and, to avoid nasty surprises, do not use 5, 6 or 7.
The current values for LUFILE and their effect are:
\begin{XMP}
LUFILE<=0 An interactive dialog will guide the user.
0<LUFILE<100 GKS Appendix E metafile on unit LUFILE.
100<LUFILE<200 PostScript file on unit LUFILE-100.
200<LUFILE<300 Encapsulated PostScript file on LUFILE-200.
1000<LUFILE<1100 Tektronix 4014 style file on unit LUFILE-1000
\end{XMP}
For example, LUFILE=109 would produce PostScript output on the
Fortran file defined by logical unit=9.
\begin{note}
Output of Tektronix 4014 escape codes is available under VMS and Aegis.
It can be provided under VM/CMS if requested.
\end{note}
\item {\bf CALL GCGIMP (MAXTYP, GKSNWT, GKSSYN, GKSWCN, GKSWTP)}
\index{GKSPACK!{\protect\tt GCGIMP}}
\index{GKS routine!{\protect\tt GCGIMP}}
Supplies information in implementation file.
See introduction to this section for file naming details.
{\bf Input:}
\begin{DLtt}{123456}
\item[MAXTYP (I)]maximum number of wk types
(i.e. dimension of output arrays)
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[GKSNWT (I)]number of wk types in the implementation file
\item[GKSSYN (C*6)]array (dimension MAXTYP) with the workstation types names
(e.g. "VXUIS~")
\item[GKSWCN (I)]array (dimension MAXTYP) with the workstation connection id's
\item[GKSSYN (I)]array (dimension MAXTYP) with the workstation types integer
values (e.g. 41)
\end{DLtt}
\item {\bf CALL GCIMPL(IMPLEM)}
\index{GKSPACK!{\protect\tt GCIMPL}}
\index{GKS routine!{\protect\tt GCIMPL}}
Supplies implementation name string.
{\bf Output:}
\begin{DLtt}{123456}
\item[IMPLEM (C*6)]Name of implementation
\end{DLtt}
.pa
\item {\bf CALL GCGWTC(WKTYP, CONID)}
\index{GKSPACK!{\protect\tt GCGWTC}}
\index{GKS routine!{\protect\tt GCGWTC}}
Asks the user to enter interactively a workstation type, and
provides the list of the available workstation types if the user enters '?'.
Automatically returns to the program the connection id to be used
for the selected workstation type.
{\bf Output:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[CONID (I)]Connection Id
\end{DLtt}
\item {\bf CALL GCNAME(WKID, STRING)}
\index{GKSPACK!{\protect\tt GCNAME}}
\index{GKS routine!{\protect\tt GCNAME}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]GKS workstation identifier of the metafile
receiving the frame (INTEGER).
\item[STRING (C*7)]The name of the frame (CHARACTER*(*)).
\end{DLtt}
GCNAME enables the user to give the name 'STRING' to the current frame
when output to a metafile for inclusion in compound documents.
The name 'STRING' may also be used by GRVIEW and GRCONV
to select frames on the metafile.
Users of this facility {\bf must} place the call to GCNAME before
outputting any primitives to a new frame.
For examples of its use, see on Page~\pageref{sec:gcnref} and on Page~\pageref{sgmlgra}.
{\bf Restrictions:} Only the first 7 characters of 'STRING' are used.
If less than 7 characters are given 'STRING' will be padded with
\Lit{'$'}s (dollars).
For VM/CMS system reasons, the character set for 'STRING' can
only contain upper-case alphabetic, the digits 0-9, and the dollar
sign (\Lit{'$'}), so lower-case characters are converted to upper-case.
\item {\bf CALL GCQWKN(WKID, IERR, CONID, WKNAM)}
\index{GKSPACK!{\protect\tt GCQWKN}}
\index{GKS routine!{\protect\tt GCQWKN}}
Analog to the routine GQWKC but returns a workstation type name
instead of a workstation type integer value.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Id
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[IERR (I)]value returned by GQWKC
\item[CONID (I)]Connection Id
\item[WKNAM (C*6)]Workstation Name (e.g. "VXUIS~")
\end{DLtt}
\item {\bf CALL GCSDWN(WKTYP, RWIN, TITLE)}
\index{GKSPACK!{\protect\tt GCSDWN}}
\index{GKS routine!{\protect\tt GCSDWN}}
This routine is described fully in section on Page~\pageref{sec:vstnref}.
The DECGKS version is a dummy.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[RWIN (R*4)]Window Size
\item[TITLE (C)]Window title
\end{DLtt}
\item {\bf CALL GCSTOP}
\index{GKSPACK!{\protect\tt GCSTOP}}
\index{GKS routine!{\protect\tt GCSTOP}}
GCSTOP deactivates and closes all GKS workstations and closes GKS.
\item {\bf CALL GCWTPC(WKTYP, WKTSYN)}
\index{GKSPACK!{\protect\tt GCWTPC}}
\index{GKS routine!{\protect\tt GCWTPC}}
Returns the workstation type name corresponding to a workstation
type integer value.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type (e.g. 41 for DECGKS)
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[WKSTYN (C*6)]Workstation Type Name (e.g. "VXUIS~")
\end{DLtt}
\item {\bf CALL GCWTPI(WKTSYN, CONID, WKTYP)}
\index{GKSPACK!{\protect\tt GCWTPI}}
\index{GKS routine!{\protect\tt GCWTPI}}
Get workstation type and connection id corresponding to a given
workstation type name (e.g. 'VXXW').
{\bf Input:}
\begin{DLtt}{123456}
\item[WKSTYN (C*6)]Workstation Type Name
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[CONID (I)]Connection Id
\end{DLtt}
\item {\bf CALL GPRMPT(WKID, PROMPT, LSTRI, REPLY)}
\index{GKSPACK!{\protect\tt GPRMPT}}
\index{GKS routine!{\protect\tt GPRMPT}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID]GKS workstation identifier of the terminal (INTEGER).
\item[PROMPT]Application prompt (CHARACTER *(*)).
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[LSTRI]Length of reply (INTEGER).
\item[REPLY]User reply (CHARACTER).
\end{DLtt}
GPRMPT gives a prompt and waits for a reply from the user in a GKS
interactive graphics program.
As an example, GPRMPT could be used to hold a picture on
the screen until the user was ready to view the next one,
or terminate the program.
If the user hits only a Carriage Return then LSTRI=0 and 'REPLY' is not
defined. For example:
\begin{XMP}
CALL GPRMPT(WKID, 'Type RETURN or QUIT', LSTRI, REPLY)
IF(LSTRI.GT.0) THEN
C Call STOPPG to do whatever you want to do on QUIT
IF REPLY(1:4) .EQ. 'QUIT') CALL STOPPG
ENDIF
\end{XMP}
Currently the prompt is put into a GKS segment which is deleted
after the reply. If a segment is open when GPRMPT is called,
the prompt will be added to it but it will not be deleted. This
could be confusing and should be avoided by closing an open
segment before calling GPRMPT.
If the workstation is not a terminal or the job is in batch
GPRMPT does nothing.
{\bf Restrictions:} If GPRMPT is used repeatedly within one picture,
the prompts will overprint if the terminal does not have selective erasure.
If long prompts and/or small workstation windows are used the
prompt will be truncated.
\item {\bf CALL GRQSK(WKID, LDSTK, NMAX, ISTATS, IT, NP, PX, PY)}
\index{GKSPACK!{\protect\tt GRQSK}}
\index{GKS routine!{\protect\tt GRQSK}}
Emulates the GTSGRAL request stroke (locator loop)
which requires a button push to input each point.
This is in contrast to the DECGKS implementation of GRQSK
which reads the current cursor position in a loop
with fixed time or position intervals.
\begin{note}
If it is intended to use this routine to replace the version of GRQSK in the
GKS library it must be linked ahead of the library.
\end{note}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Identifier
\item[LDSTK (I)]Stroke logical device
\item[NMAX (I)]Maximum number of points
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[ISTATS (I)]Status
\item[IT (I)]Normalization Transformation Number
\item[NP (I)]Number of points returned
\item[PX (I*NMAX)]X coordinates
\item[PY (I*NMAX)]Y coordinates
\end{DLtt}
\item {\bf CALL GUARC(XM,YM,XP,YP,XH,YH,XQ,YQ)}
\index{GKSPACK!{\protect\tt GUARC}}
\index{GKS routine!{\protect\tt GUARC}}
Emulation of GTS-GRAL utility to draw a circular arc defined by 4 points.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of arc
\item[XP, YP]Start point
\item[XH, YH]Point on arc
\item[XQ, YQ]Point on end radius of arc
\end{DLtt}
\item {\bf CALL GUBEZ1(N,XP,YP)}
\index{GKSPACK!{\protect\tt GUBEZ1}}
\index{GKS routine!{\protect\tt GUBEZ1}}
Emulation of GTS-GRAL utility to draw a Bezier curve defined by a
Bezier polygon.
{\bf Input:}
\begin{DLtt}{123456}
\item[N (I)]Dimension of XP, YP
\item[XP, YP]Points on Bezier polygon
\end{DLtt}
.pa
\item {\bf CALL GUCIR1(XM,YM,XP,YP)}
\index{GKSPACK!{\protect\tt GUCIR1}}
\index{GKS routine!{\protect\tt GUCIR1}}
Emulation of GTS-GRAL utility to draw a circle defined by
(midpoint, peripheral point).
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of circle
\item[XP, YP]Peripheral point on circle
\end{DLtt}
\item {\bf CALL GUCIR2(XM,YM,R)}
\index{GKSPACK!{\protect\tt GUCIR2}}
\index{GKS routine!{\protect\tt GUCIR2}}
Emulation of GTS-GRAL utility to draw a circle defined by
(midpoint, radius).
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of circle
\item[R]Radius
\end{DLtt}
\item {\bf CALL GUCUR1(N,XP,YP)}
\index{GKSPACK!{\protect\tt GUCUR1}}
\index{GKS routine!{\protect\tt GUCUR1}}
Emulation of GTS-GRAL utility to draw a curve defined by interpolating points.
{\bf Input:}
\begin{DLtt}{123456}
\item[N (I)]Dimension of XP, YP
\item[XP, YP]Points in polygon.
\end{DLtt}
\item {\bf CALL GUELL1(XM,YM,A,B)}
\index{GKSPACK!{\protect\tt GUELL1}}
\index{GKS routine!{\protect\tt GUELL1}}
Emulation of GTS-GRAL utility to draw an ellipse defined by
(midpoint, semi-axes).
An ellipse is drawn with midpoint XM,YM; the length of semi-axis
in the X-direction is A, and in the Y-direction is B.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM] Midpoint of Ellipse
\item[A, B]Semi-axes of ellipse in X and Y directions
\end{DLtt}
\item {\bf CALL GUELL2(XM,YM,A,B,BEGRAD,ENDRAD,ROTATE)}
\index{GKSPACK!{\protect\tt GUELL2}}
\index{GKS routine!{\protect\tt GUELL2}}
Emulation of GTS-GRAL utility to draw an elliptical arc specified by the
midpoint XM,YM, the size of the semi-axes in direction X and Y (A, B),
and \Lit{BEGRAD} and \Lit{ENDRAD} which define the radius of the start and end points.
The ellipse is rotated with angle ROTATE in an anti-clockwise direction.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM] Midpoint of Ellipse
\item[A, B]Semi-axes of ellipse in X and Y directions
\item[BEGRAD]Angle of arc start point
\item[ENDRAD]Angle of arc end point
\item[ROTATE]Angle of anti-clockwise rotation
\end{DLtt}
.pa
\item {\bf CALL GUMEN2(WK,DNR,CHECXL,CHECXH,CHECYL,CHECYH,MENU)}
\index{GKSPACK!{\protect\tt GUMEN2}}
\index{GKS routine!{\protect\tt GUMEN2}}
Emulation of GTS-GRAL utility to define a menu in a given echo area.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Id
\item[DNR (I)]Device Number
\item[CHECXL]Echo area X Lower Bound
\item[CHECHL]Echo area X Higher Bound
\item[CHECYL]Echo area Y Lower Bound
\item[CHECYL]Echo area Y Higher Bound
\item[MENU (C)]String of menu items separated by ',' and terminated
by '.'.
\end{DLtt}
\item {\bf CALL GUNERR(N)}
\index{GKSPACK!{\protect\tt GUNERR}}
\index{GKS routine!{\protect\tt GUNERR}}
Dummy routine which writes error message to file GTSTODEC.ERR.
{\bf Output:}
\begin{DLtt}{123456}
\item[N]Number of GKS errors which occurred.
\end{DLtt}
\item {\bf CALL GUSIGD(FLAG)}
\index{GKSPACK!{\protect\tt GUSIGD}}
\index{GKS routine!{\protect\tt GUSIGD}}
Dummy routine which writes error message to file GTSTODEC.ERR.
{\bf Input:}
\begin{DLtt}{123456}
\item[FLAG (L)]Set (.TRUE.) or reset (.FALSE.) simulation flag.
\end{DLtt}
\end{OL}
\subsection{GKSPACK Error Messages}
\index{GKSPACK!Error Messages}
\begin{DLtt}{123456}
\item[GTSGRAL]
If GOPKS has been called before any call to GKSPACK then any GKSPACK
errors will be written to the GKS error file. Otherwise, if an error occurs
from a call to a GKSPACK routine preceeding the GOPKS call, then the error
message will be sent to the screen.
\item[DECGKS]
All errors are written to GKSPACK.ERR.
\end{DLtt}
The list of GKSPACK error messages is as follows:
\begin{DLtt}{123456}
\item[-1]Cannot open implementation file.
The file does not exist or: (VMS) the logical name \Lit{GKS_IMPLEMENTATION}
is not correctly defined; or (UNIX) the environment variable \Lit{GKS_IMPLEM}
is not correctly defined.
\item[-2]Internal error in GCGIMP. The arrays are too small to contain
the information for all the workstations listed in the implementation file.
Contact Graphics Section at CERN (CN/US/GR).
\item[-3]The user arrays are too small to contain the information for all
the workstations listed in the implementation file .
\item[-4]The GKSPACK routine called is a dummy for this GKS implementation.
\item[-5]Error in GCNAME: invalid characters in frame name.
\end{DLtt}
\subsection{GKSPACK Example Program}
\index{GKSPACK!Example Program}
The following complete program illustrates the use of some
routines available in GKSPACK:
\begin{XMP}
PROGRAM DEMOC1
*
* Include file defines GKSGRAL workstation types
*
INCLUDE 'GKS$GTSDEV'
CHARACTER*80 REPLY
REAL XSINX(51),XSINY(51)
REAL XTWOPI
DATA XTWOPI/6.28318/
*
* DEFINE THE WORKSTATION TYPE (Pericom MG600) AND METAFILE
*
CALL GCINIT (1, MG600, 2, 30, 11)
*
* DEFINE AND SELECT A TRANSFORMATION THAT DIRECTS OUTPUT
* TO A WINDOW OF (0.0, 1.0) X (-1.0, 1.0)
*
CALL GCNAME (2, 'TITLE01')
CALL GSWN (1, 0.0, 1.0 ,-1.0, 1.0)
CALL GSELNT (1)
*
* BEGIN OUTPUT
*
DO 10 K=1,51
XSINX(K) = FLOAT(K-1)*0.02
XSINY(K) = SIN(XSINX(K)*XTWOPI)
10 CONTINUE
CALL GPL (51, XSINX, XSINY)
*
* ALL DONE WITH GKS, CLOSE THE SYSTEM
*
CALL GPRMPT (1, 'TYPE RETURN', LSTRI, REPLY)
CALL GCSTOP
END
\end{XMP}
\chapter{\protect\label{sec:gtschap}GKSGRAL and GKSGRAL-3D}
\index{GKSGRAL}
\index{GKSGRAL-3D}
\index{FORTRAN binding}
\index{levels of GKS}
GKSGRAL is a full implementations of the ISO GKS standard.
It is written in FORTRAN, and the application interface follows
the final version of the FORTRAN binding \cite{bib-gksftn}.
The kernel is written to the level '2c', although versions on
some systems (and some of the drivers) only support level '2b'.
In particular, the version on IBM is only '2b'.
\index{input!event mode}
\index{input!sample mode}
The main reason not to support '2c' is that Event Mode requires
the operating and communications systems to support asynchronous
interrupts. However, this feature is not required for Sample Mode.
Thus, although it requires 'bending' the standard slightly,
a pseudo Sample Mode is available on some devices which do not conform
to level '2c' using the following procedure:
\begin{XMP}
C Set string mode to sample
CALL GSSTM (WKID,1,1,0)
C Request the locator position
CALL GRQLC (WKID,1,STAT,TNR,PX,PY)
C Sample the character typed
CALL GSMST (WKID,1,1,NCH,STRING)
C Set string mode back to request
CALL GSSTM (WKID,1,0,1)
\end{XMP}
The current release of GKSGRAL-3D, Version 2.0, follows the final version of
the ISO functional standard. It is also very close to the final FORTRAN binding
apart from some minor differences which will be resolved at a future date.
\section{Devices Drivers}
\index{device drivers}
\index{MEGATEK}
\index{IBM!5080}
Both GKSGRAL and GKSGRAL-3D use the same 2D device drivers.
Thus, any 2D device supported by GKSGRAL can also be driven from
GKSGRAL-3D. In addition, GKSGRAL-3D supports the IBM~5080,
TEKTRONIX~4235/6, and the MEGATEK WHIZZARD series (models 72xx and 33xx)
which have 3D hardware transformations.
\index{include files}
The list of devices supported may be found in the include file
GTSDEV reproduced in Appendix on Page~\pageref{sec:gtstyp}.
although one should check the latest machine-readable version to see
if there have been any changes.
The precise definition of the facilities provided by each workstation
driver, such as the assignment of keys, number of available colours,
and so on, is given in the Workstation Description Tables.
An abridged version of the most common of these may be found in
Appendix on Page~\pageref{sec:wdtref}.
\section{\protect\label{sec:conref}Connection Identifiers}
\index{conid}
\index{connection identifier}
\index{GKS routine!{\protect\tt GOPWK}}
For terminal connections on VAX VMS and VM/CMS, assuming that there are
no special instructions in the Workstation Description Table for the
device, any number in the range from 1 to 90 may be used for the conid
in calls to GOPWK (but see note below).
On VAX VMS, GKSGRAL uses an automatically-generated logical name to connect
to the terminal, but it is possible to override this with the command:
\begin{XMP}
\Ucom{DEFINE GKS_DEVICE_n TTxx:}
\end{XMP}
where \Lit{n} is the conid to be used and \Lit{'TTxx'}
(or \Lit{'TXxx'}, \Lit{'LTxx'}, \Lit{'RTxx'}, \Lit{'NVxx'}, etc.
depending on the type of controller)
is the name of the terminal.
Note that this will only allow use of a terminal other than the one on which
the user is logged in if the user has the requisite privileges,
or if the second terminal is not protected.
Moreover, if the second terminal is not hard-wired, for example, if it is
connected via a Local Area Network terminal server, then one must take care
at the time of use to find out the correct name of the controller and port
to which the physical terminal is connected.
If, for debugging or other purposes, one does not want to get any graphical
output on the terminal (and no graphical input is requested),
then one can connect to the NULL device with the command:
\begin{XMP}
DEFINE GKS_DEVICE_n NL:
\end{XMP}
where n is the conid to be used.
On VMS, it is possible to capture graphics output command sequences from the
HP Plotter, PostScript (see section on Page~\pageref{sec:epsref} for information
on Encapsulated PostScript) and Tektronix 4014 drivers on a file.
To do this, open a file with unit number~=~n and define the
connection identifier to be conid~=~(100~+~n),
where n is a small integer greater than 1.
If there is sufficient interest,
this feature could be extended to other devices.
\begin{note}
\index{FORTRAN Logical Unit Numbers}
At CERN, the GKSGRAL and GKSGRAL-3D packages reserve FORTRAN
Logical Unit Numbers (and hence conids) {\bf 91-93} for internal use.
Apart from these three numbers the actual range of permissible values
for connection identifiers is in fact 1-100.
\end{note}
\section{\protect\label{sec:impref1}Implementation-Dependent Features}
\index{implementation-dependent features}
\index{text fonts}
\index{fonts}
\index{italic text}
\index{proportionally spaced text}
\index{hatch styles}
\index{fill area styles}
\index{fill area limitation}
\index{maximum parameter values}
Although an ISO standard, GKS still includes some features which vary from
implementation to implementation or device to device.
The principle features to watch are text fonts and fill area hatch styles.
GKSGRAL supports 11 stroke-precision Latin text fonts plus Greek
\index{Greek text font}
and a solid filled font.
These are illustrated in \ref{fig:fonts}. The fonts are available
both in italic and non-italic scripts, as well as proportionally and
non-proportionally spaced.
The font indices are given in Appendix on Page~\pageref{sec:wdtref}.
At CERN, GKSGRAL has been modified to provide 24 Fill Area Hatch Styles
(indices -101 to -124), and these should produce the same result on all
workstations (see \ref{fig:hatch}).
Other parameters with which the package has currently been configured are:
\begin{DLtt}{123456}
\item[Workstations]10 simultaneously open workstations (6 on VM)
\item[Segments]2000 segments, names in range 1-32763
\item[Normalization Transformations]21 (0 to 20)
\item[Viewing Transformations]21 (0 to 20)
\item[Fill Area]The number of points in a Fill Area primitive
is limited to 300.
\item[Polyline]If the number of points in a Polyline is larger
than 300 then they will be split into several primitives.
\end{DLtt}
The actual values of these and other parameters
may be inquired at run-time by calling the routines:
\index{GKS routine!{\protect\tt GQWKM}}
\index{GKS routine!{\protect\tt GQMNTN}}
\index{GKS routine!{\protect\tt GQLWK}}
\index{GKS routine!{\protect\tt GQLWK3}}
\index{GKS routine!{\protect\tt GQSGS}}
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQWKM(ERRIND, MXOPWK, MXACWK, MXWKAS)
CALL GQMNTN(ERRIND, MAXTNR)
CALL GQLWK(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI, MCOLI)
or
CALL GQLWK3(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI,
MEDBTE, MCOLI, MVTE)
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
where the parameters are as follows:
\begin{DLtt}{123456}
\item[WTYPE]workstation type (input parameter)
\item[ERRIND]error number
\item[MXOPWK]maximum number of simultaneously open workstations
\item[MXACWK]maximum number of simultaneously active workstations
\item[MXWKAS]maximum number of workstations associated with a segment
\item[MAXTNR]maximum normalization transformation number
\item[MPLBTE]maximum number of polyline bundle table entries
\item[MPMBTE]maximum number of polymarker bundle table entries
\item[MTXBTE]maximum number of text bundle table entries
\item[MFABTE]maximum number of fill area bundle table entries
\item[MPAI]maximum number of pattern indices
\item[MEDBTE]maximum number of edge bundle table entries
\item[MCOLI]maximum number of colour indices
\item[MVTE]maximum number of view table entries
\item[NSGP]maximum number of segment priorities
\end{DLtt}
There is unfortunately no function provided to inquire the maximum available
number of segments.
\begin{figure}[h]
\begin{verbatim}
picture name=FONTS$$S
\end{verbatim}
\caption{GTS-GRAL italic, proportionally-spaced fonts}
\label{fig:fonts}
\end{figure}
\begin{figure}[h]
\begin{verbatim}
picture name=HATCH$$S
\end{verbatim}
\caption{CERN-defined hatch patterns}
\label{fig:hatch}
\end{figure}
\section{System-Dependent Considerations}
\subsection{IBM}
\index{IBM!GKSGRAL on}
\index{GKSGRAL!on IBM}
\index{HPLOT}
\index{HIGZ}
\index{GKSPACK}
\index{GRAFLIB}
The recommended way of linking GKS is to use the GKS or GRAFLIB parameters
to the {\bf CERNLIB} command to provide automatic access to just
the GKS library, or to both GKS and various higher level graphics
packages which use GKS, notably the CERN Program Library packages
GKSPACK (J551), HPLOT (Y251) and HIGZ (Q120).
Further details on what follows may be obtained via the command
{\bf FIND~CERNLIB}.
\begin{XMP}
CERNLIB GKS
or
CERNLIB GKS3D
or
CERNLIB GRAFLIB (GTS2D [or (GTS3D]
\end{XMP}
The optional parameter '(GTS2D' or '(GTS3D' gives access to the 2D or 3D
GTS-GRAL libraries. In addition, the CERNLIB command
provides access to three versions of each library: {\bf OLD},
{\bf PRO} (default), and {\bf NEW}.
For example, to access the previous library version use:
{\bf 'GRAFLIB!OLD~(GTS2D'}.
Application programs must use VS FORTRAN.
The optional INCLUDE files for the GKS parameters are kept in the data set
{\bf 'GKSINCL~MACLIB'} on the automatically accessed Q-Disk.
The basic commands and tools to use GKS are:
\begin{DLtt}{123456}
\item[CERNLIB GKS]
To access just GKS (or GKS3D for 3D)
\item[CERNLIB GRAFLIB (GTS2D or GTS3D]
To access the GKS and higher level packages
\item[VFORT gksprog]To compile.
\item[LOAD gksprog (START]To execute.
\end{DLtt}
These last two commands may be replaced by the sequence:
\begin{DLtt}{123456}
\item[VFORT gksprog (GO]To compile and go
\end{DLtt}
In this sequence the CERNLIB Exec makes available the VS FORTRAN libraries,
the CERN program library and the GKS library in the correct
order, as well as other associated files.
Any number of user libraries may be specified.
\index{include!on IBM}
To use the GKS include files one must give the command:
\begin{XMP}
GLOBAL MACLIB GKSINCL
\end{XMP}
before compiling the program. Within the code, the files may be included
by using the construct:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE (ENUM)
INCLUDE (GTSDEV)
\end{XMP}
where the first file contains the ENUMeration types, and the second
contains the GTS-GRAL DEVice types. No compiler options are necessary.
An alternative method to access the information in the include files
would be via the PATCHY utility by using the KEEP sequences of
GTSDEV and ENUM stored in the PATCH GKSINCL on the GKSPACK Pam file.
To access this Pam file on IBM type GIME~CERNPAMS.
Full information on GKS under VM can be found by using the command
{\bf FIND~GKS} and for the higher level packages via
{\bf FIND~HPLOT}, {\bf FIND~GKSPACK},
{\bf FIND~NAGLIB}, {\bf FIND~HIGZ},
and {\bf FIND~PAW}.
The example programs listed in the Appendix of the
{\it GKS/GKS-3D Primer} are available on the disk accessed via
{\bf GIME~GKS} with the file names:
\index{exref}
\index{execution!on IBM}
\begin{XMP}
GKSEXn FORTRAN (where n = 1, 6)
\end{XMP}
The GKS and GSK-3D libraries released in October 1989 are compatible with
the IBM XA exploitation mode.
\subsection{Use of Segment Storage on IBM}
The CERN GKS installation uses logical unit 91 for the dynamic segment
storage file.
A small file is created on the mini-disk with the largest free writeable
space, dynamically extended as necessary, and deleted when GKS is closed.
Jobs which crash or fail to call GCLKS will leave the file
GKSDYNAM~FT91F001 on a mini-disk and it is best to delete it.
Applications which create a large number of segments may require
up to 5 cylinders of free disk space which can be put on a temporary
disk for example.
\subsection{Debugging on IBM}
\index{Debugging on IBM}
\index{IBM!debugging}
\index{IBM!DIAL (use of)}
Debugging {\it interactive} graphics programs on VM is very difficult,
but by using two terminals it can be done more comfortably, and one can
also use the interactive debugger. The procedure is as follows:
\begin{OL}
\item Login on the alphanumeric terminal as usual, then inform VM that
the graphics output will be on another terminal by the command:
\index{DIAL}
\begin{XMP}
DEF GRAF 019 3270
\end{XMP}
(Where 019 is simply a free address.)
\item Then on the graphics terminal which probably has to be connected
through index class 125, connect using the DIAL command:
\begin{XMP}
DIAL userid
\end{XMP}
\end{OL}
Now all alphanumeric i/o, debug commands and FORTRAN run time errors
will be on the alpha terminal and only the graphics i/o on the DIALled
terminal.
\subsection{VAX/VMS}
\index{VAX!GKSGRAL on}
\index{GKSGRAL!on VAX/VMS}
\index{HPLOT}
\index{HIGZ}
\index{GKSPACK}
\index{GRAFLIB}
The recommended way of linking GKS is to use the GKS or GRAFLIB parameters
to the {\bf CERNLIB} command to define the symbol%
\footnote{These commands also define the logical name \Lit{GKS\_ROOT}.}
'\Lit{LIB$}' which provides access to just
the GKS library, or to both GKS and various higher level graphics
packages which use GKS, notably the CERN Program Library packages
GKSPACK (J551), HPLOT (Y251) and HIGZ (Q120).
Further details on what follows may be obtained via the command
\Ucom{HELP CERNLIB}.
\begin{XMP}
CERNLIB GKS
or
CERNLIB GKS3D
or
CERNLIB GRAFLIB/GTS2D [or /GTS3D]
\end{XMP}
The optional parameter '/GTS2D' or '/GTS3D' gives access to the 2D or 3D
versions of the GTS-GRAL package.
In addition, the CERNLIB command provides access to three versions
of the libraries: {\bf OLD}, {\bf PRO} (default), and {\bf NEW}.
For example, to access the previous version of GRAFLIB use the switch
{\bf 'GRAFLIB/OLD'}.
The current default version of GKS is the version from GTS-GRAL,
and so the switch '/GTS2D' may be omitted.
The default situation is to link to shared versions of the libraries.
For non-shared versions it is necessary to add the extra switch
'/NOSH[AREABLE]'.
The basic commands and tools to use GKS are:
\begin{DLtt}{123456}
\item[CERNLIB GKS or GKS3D]
To access just GKS or GKS3D
\item[CERNLIB GKS/NOSH[AREABLE]
To access a non-shared version of GKS or GKS3D
\item[CERNLIB GRAFLIB/GTS2D or /GTS3D]
To access the GKS and higher level packages
\item[FOR gksprog]To compile.
\item[LINK gksprog,'LIB\$']To link.
\item[RUN gksprog.exe]To run.
\end{DLtt}
\index{include!on VAX/VMS}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are accessed via the logical names:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE 'GKS$GTSDEV'
and
INCLUDE 'GKS$ENUM'
\end{XMP}
and the example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} may be found in:
\index{execution!on VAX}
\begin{XMP}
GKS_ROOT:[DMO]GKSEXn.FOR (n = 1,6)
\end{XMP}
An alternative method to access the information in the include files
would be via the PATCHY utility by using the KEEP sequences of
GTSDEV and ENUM stored in the PATCH~GKSINCL on the GKSPACK Pam file.
On VAX VMS this is stored in:
\begin{XMP}
CERN_ROOT:[PAM]GKSPACK.PAM.
\end{XMP}
\subsection{\protect\label{sec:vstnref}VAXstation features}
\index{VAX!setting window size}
\index{VAXstation}
\index{X-Window}
In the PROduction version (3.2) of GKSGRAL the driver for the VAXStation
uses the UIS interface. To use the X-Window driver (for VAXStations
with DECWindows interface) one has to move to the new version of
GKSGRAL (3.4) which is only available on request.
The UIS driver allows a single process to
open multiple GKS workstations, each corresponding to a new window.
To use this feature it is necessary to call GOPWK once to open each workstation
with a different value in the series of UIS workstation types.
There are also several special features available for users of VAXstations
in order to manipulate the windows:
\begin{UL}
\item The logical name \Lit{GKS_WINDOW} set by the system startup procedure
\Lit{GKSSTART.COM} can point to any directory which contains a copy of
the file WINDOW.DAT. Users can redefine this logical name later
as they wish. However, if a private version of the file WINDOW.DAT
is found in the current working directory, then this one will be used
rather than \Lit{GKS_WINDOW:WINDOW.DAT}.
\item
\index{GKS routine!{\protect\tt GCSDWN}}
The CERN utility routine GCSDWN (Set Display Window) can be used for
VAXstations in order to set inside a program the display window size,
position and title (used by GOPWK), instead of using the values stored
in the file WINDOW.DAT. The function must be called {\bf before}
calling GOPWK. The calling sequence is:
\begin{XMP}
SUBROUTINE GCSDWN (IWTYPE, RWIN, TITLE)
INTEGER IWTYPE
REAL RWIN(4)
CHARACTER*(*) TITLE
\end{XMP}
Where IWTYPE is the workstation type (8601, 8602, etc...),
RWIN is the window size and position, and TITLE contains the name given
to the window by the display manager.
\item
\index{GKS routine!{\protect\tt GCCDWN}}
The CERN utility routine GCCDWN (Change Display Window) can be used for
VAXstations in order to change interactively inside a program the display
window size and position after the window has been created.
The calling sequence is:
\begin{XMP}
SUBROUTINE GCCDWN (WKID, RWIN, CODE)
INTEGER WKID
REAL RWIN(4)
INTEGER CODE
\end{XMP}
Where WKID is the workstation identifier, RWIN is the window size and
position, and CODE takes either the value '1' or '2'.
If CODE=1, then any window size can be defined but the workstation viewport
is not changed, which may change the aspect ratio.
If CODE=2, then there is a restriction placed on the window size that can be
chosen to ensure that the aspect ratio of the window contents remains
unchanged. The function uses the largest window that can fit into the size
the user has chosen and that has the same height/width ratio as the initial
window. The workstation viewport is automatically resized and the
workstation updated.
\end{UL}
For both GCSDWN and GCCDWN, the window size and position have to be given in
the same order as in the file WINDOW.DAT:
\begin{XMP}
RWIN(1) = window size in X in metres
RWIN(2) = window size in Y in metres
RWIN(3) = window position X on screen in metres
RWIN(4) = window position Y on screen in metres
\end{XMP}
\subsection{\protect\label{sec:unixlib}UNIX}
\Lit{$==>$} Uptodate ???
\index{Unix!GKSGRAL on}
\index{GKSGRAL!on UNIX}
\begin{note}
{\it File names and directory paths under UNIX}
\footnote{UNIX is a trade mark of AT\&T}
{\it are case sensitive.}
\end{note}
Since October 1989 the CERN GTS-GRAL licence has been extended to include
general use of the company's software on any UNIX platform, including
UNICOS on the CRAY. In addition, from July, 1990, GKSGRAL on Apollo
platforms is supported under UNIX, rather than Aegis. However, affiliated
institutes with a UNIX licence will still need a licence specifically for
Apollo if they wish to receive the Apollo screen drivers.
As the UNIX system runs on many hardware platforms, only a few of which
are available at CERN, it may not be possible for the CERN Program Library
to distribute binary libraries for the particular machine a user requires.
Thus, UNIX users may have to install the software from
\index{Unix!tar file}
\index{tar file}
a TAR file as explained in Appendix on Page~\pageref{sec:unixdis}.
To use GKSGRAL or GKSGRAL-3D one requires access
to the libraries, font files and include files.
These should be made available via environment variables provided
by the system manager. For example, if the C shell is being used,
the following should be included in the .login file:
\begin{XMP}
setenv gkshome /user/gts-gral_root_directory
setenv gkslib $gkshome/gks/libs/gkslib.a
setenv gksdriv $gkshome/gks/libs/gksdriv.a
setenv gks3dlib $gkshome/gks3d/libs/gks3d.a
setenv GKS_FONTS $gkshome/gks/fonts
\end{XMP}
Thus, all variables are defined in terms of \Lit{$gkshome}, which should
be set to point to the local root directory.
Conversely, one can also store the files in a way analogous
to that used for the CERN Program Library, in which case the
environment variables should be set to:
\begin{XMP}
setenv gkslib /cern/gks/pro/lib/gkslib.a
setenv gksdriv /cern/gks/pro/lib/gksdriv.a
setenv gks3dlib /cern/gks/pro/lib/gks3dlib.a
setenv GKS_FONTS /cern/gks/pro/dat
\end{XMP}
Environment variables may be set under the Bourne or Aegis shells
as in the example:
\begin{XMP}
GKS_FONTS=/cern/gks/pro/dat; export GKS_FONTS #Bourne
or
GKS_FONTS := '/cern/gks/pro/dat'; export GKS_FONTS #Aegis.
\end{XMP}
To drive terminals attached via RS232 terminal lines, it is necessary
also to make a logical connection between the Connection Identifier
specified in the call to Open Workstation and the device.
GKSGRAL communicates with a set of devices defined by environment
variables {\it gksch01} to {\it gksch18}, where the numbers
1 to 18 correspond to the connection identifier. Thus, if the
connection identifier is set to '1', and one wishes to perform graphics
on the same terminal used for alpha-numeric commands, then under
the C shell one would need to type:
\begin{XMP}
setenv gksch01 /dev/tty
\end{XMP}
To compile and link a Fortran program 'myprog.f' use the commands:
\begin{XMP}
f77 myprog.f $gkslib $gksdriv -o myprog.exe
\end{XMP}
In this case the f77 command is used both to compile and link the
program. One can also compile and link the program separately.
However, even in this case one should probably use the f77 command to
link, rather than ld. For example, on DECstations the f77 command
automatically includes all the system libraries, whilst ld does not.
\begin{XMP}
f77 -c myprog.f -o myprog.o
\end{XMP}
\begin{note}
In general under UNIX the linker searches libraries in order.
Thus, a reference from a library at the end of the sequence to a routine
stored in an earlier library will result in an unresolved external
reference. This is the case for GKS-3D. A solution is to specify
the earlier library twice:
\begin{XMP}
f77 myprog.o $gks3dlib $gksdriv $gks3dlib -o myprog.exe
\end{XMP}
\end{note}
\index{include!on UNIX}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory \Lit{$gkshome/utl}
and/or, on systems which
follow the CERN library conventions, /cern/gks/pro/utl.
They may be accessed from a FORTRAN program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
At run time GKS reads in the files defining the software fonts.
These are accessed via an environment variable \Lit{GKS_FONTS} which should
defined either by the system or user login procedure.
\index{execution!on UNIX}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} are available either in \Lit{$gkshome/dmo}
or in \Lit{/cern/gks/pro/dmo}, with the names gksexN.f (N=1 to 6).
However, one may need to edit them in order to set the desired workstation
type and to use the correct paths to the include files.
On some systems the top directory /cern may not exist, in which case
one should use the environment variable \Lit{$CERN} to point to the root
directory.
\begin{note}
{\it
File names and directory paths under UNIX are case sensitive;
\Lit{$CERN} is not equivalent to \Lit{$cern}!}
\end{note}
A selection of help files, including this one, are to be found
in \Lit{$gkshome/doc}.
\subsection{APOLLO}
\Lit{$==>$} Uptodate ???
\index{Apollo!GKSGRAL on}
\index{GKSGRAL!on APOLLO}
\index{Apollo!GPR}
\index{Apollo!GSR}
From July 1990 onwards only Aegis SR~10 and later system releases
will be supported. Aegis SR~9.7 library versions are obtainable,
but will not be updated. Aegis SR~10 is compatible with UNIX,
and so simplifies support. However, this affects file formats and access control,
as well as the case sensitivity of file names. Although earlier versions of AEGIS
were insensitive to the case of file names, this is no longer true from for
SR~10.0 onwards. Thus, for example, the program statement:
\begin{XMP}
INCLUDE '/CERN/GKS/PRO/UTL/GKS_GTSDEV'
\end{XMP}
will not work with SR~10.0. The path name within quotes
{\bf must be in lower case}.
Other points to watch for are that a '/' character is used after the tilde
when accessing files via the naming directory,
and that when using UNIX shells every command creates a new process, so care must
be taken when using scripts to set variables that they run in the context
of the current process.
For example, by typing \Ucom{source my\_script} under the
C~shell, or \Ucom{. my\_script} under the Bourne shell.
Apart from the Appendix E metafile and PostScript drivers, the standard
CERN APOLLO libraries include drivers for both the GPR and GSR graphics
interfaces. For machines such as the DN590, with GSR hardware support,
the GSR driver will produce improved performance. This driver also
allows a single process to open multiple GKS workstations, each
corresponding to a new APOLLO window. The next release of this driver
is scheduled to support level C input, as well as a display surface size
which may be changed dynamically. To use multiple GSR windows from a single
graphics process it is necessary to call GOPWK once to open each workstation
with a different value in the series of GSR workstation types.
At run time GKS reads in the files defining the software fonts (which are
used by all drivers). These are accessed via an environment variable
\Lit{'GKS_FONTS'} which may be created by a command in the
startup or \Lit{.login} files.
At CERN, this environment variable points to the cernlib directory
\Lit{/cern/gks/pro/dat} (or \Lit{.../new/dat} or \Lit{.../old/dat},
depending on the version
required). The fonts are distributed in the directory
\Lit{$gkshome/gks/fonts}.
(In addition to the software fonts used by all drivers, the GSR driver
can use also the hardware fonts described below.)
For use of the GPR interface, the user's home directory should contain a
file (or link) called \Lit{gks_characteristic} which contains set-up
information describing the display. An example of this file may be found in:
\begin{XMP}
/cern/gks/pro/dat/gks_characteristic
\end{XMP}
If the user does not have a link from the naming directory to a
private copy of \Lit{gks_characteristic}, then GKSGRAL will attempt to read a
default version which should be made available by creating the
following link:
\begin{XMP}
crl /dev/gks_characteristic @
/cern/gks/pro/dat/gks_characteristic
\end{XMP}
(As /dev is protected, this must be done from a privileged account.)
A private copy of the file may be edited to give the
desired window size. As an example, the Workstation Type 10002
corresponds to the second line of the file.
The standard versions of this file is distributed in
\Lit{$gkshome/gks/drivers/adgpr}.
The GSR driver requires access to two configuration files, one called
\Lit{gks_workstations.config} which is similar to the GPR
\Lit{gks_characteristic} file containing set-up information describing
the display, and one called \Lit{gks_fonts.config} which lists
the available hardware fonts. ~
Copies of these files, modified as necessary,
may be stored in (or pointed to by links from) the user's home directory,
or default versions should be made available by creating the links:
\begin{XMP}
crl /sys/node_data/gks_workstations.config @
/cern/gks/pro/dat/gks_workstations.config
and
crl /sys/node_data/gks_fonts.config @
/cern/gks/pro/dat/gks_fonts.config
\end{XMP}
The standard versions of these files are distributed in
\Lit{$gkshome/gks/drivers/adgsr}.
As for other UNIX machines, the libraries are stored either in
\Lit{$gkshome/gks/libs} or /cern/gks/pro/lib, and it is easiest to access
them via environment variables:
\begin{XMP}
setenv gkslib /cern/gks/pro/lib/gkslib_3000.a
setenv gksdriv /cern/gks/pro/lib/gksdriv_3000.a
setenv gks3dlib /cern/gks/pro/lib/gks3d_3000.a
\end{XMP}
Where the '3000' refers to the compiler option used to produce executable modules
targeted at the DN3000 style machines with a Motorola 68020/30/40. One
can replace '3000' by '10000' for the DN10000 library versions.
To compile and link directly to the libraries use:
\begin{XMP}
/com/ftn myprog.ftn -indexl
/com/bind myprog.bin $gkslib $gksdriv -b myprog.exe
\end{XMP}
where the parameter '-indexl' is optional but recommended.
The parameter -save may also be used for safety if variables which must be
available on re-entry to a routine have not been stored in a COMMON BLOCK.
However, use of -save is very detrimental to code optimization,
especially on the DN10000. The Aegis versions of the Fortran compiler
must be used to ensure that the external global symbols use the same
naming conventions as for the libraries (i.e. lower case names with
no trailing underscore).
\index{include!on Apollo}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory '/cern/gks/pro/utl'.
(Distributed in \Lit{$gkshome/utl}.)
They may be accessed from a Fortran program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
(Under SR~10.2 the Fortran compiler accepts VMS syntax for the
INCLUDE statement.)
\index{Apollo!inlib}
As linking to INLIB (shared) versions of the libraries takes up much less
space, and is much faster, INLIB libraries are defined as follows:
\begin{XMP}
setenv gkslib_inlib /cern/gks/pro/lib/gkslib_3000.inlib
setenv gks3dlib_inlib /cern/gks/pro/lib/gks3d_3000.inlib
\end{XMP}
These may be linked using the command:
\begin{XMP}
\Ucom{/com/bind myprog.o -b myprog.exe -inlib $gkslib_inlib}
or
\Ucom{/com/bind myprog.o -b myprog.exe -inlib $gks3dlib_inlib}
\end{XMP}
\index{execution!on APOLLO}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} may be found in:
\begin{XMP}
/cern/gks/pro/dmo/gksexN.ftn (N = 1,6)
\end{XMP}
They are distributed in \Lit{$gkshome/dmo}.
\subsubsection{APOLLO Models}
APOLLO nodes exit with various CPU options. The latest machine, the DN10000,
will only work with libraries compiled specifically for this hardware.
All other recent APOLLO models use the standard M68020 instruction set,
on which can be run libraries compiled with the '3000' option.
Note that CERN no longer produces libraries compiled with the 'any' switch.
\subsubsection{Apollo Workstation Types}
The following consists of a list of the workstation types for
the GTS-GRAL workstation drivers installed on Apollo.
The full list may be consulted in \Lit{gks_gtsdev}, or in
Appendix on Page~\pageref{sec:gtstyp}.
\begin{DLtt}{123456}
\item[3]WISS
\item[4]Metafile Output
\item[5]Metafile Input
\item[9701-9708]Apollo GSR interface
\item[10002]Apollo DN300, DN3000, Monochrome (GPR interface)
\item[10003]Apollo DN550, DN660, Colour (GPR interface)
\item[10004]Apollo DN3000/4000, Colour (GPR interface)
\item[12201]Postscript colour portrait
\item[12202]Postscript colour landscape
\item[12203]Postscript monochrome portrait
\item[12204]Postscript monochrome landscape
\item[10201]2D-Metafile for 3D GKS
\end{DLtt}
\subsection{CRAY under UNICOS}
\Lit{$==>$} Uptodate ???
\index{Cray!GKSGRAL on}
\index{GKSGRAL!on CRAY}
\index{Unix!on CRAY}
As the CRAY is used as a batch production service,
only the WISS and metafile workstations have been made available in the
GKSGRAL library, which should be accessed via the 'cernlib' command.
Detailed information on use of the 'cernlib' command for the CRAY is
available under VM/CMS by typing: {\bf FIND~CRAY~CERNLIB}.
However, for those users wishing simply to make use of GKS,
then the commands to type are:
\begin{XMP}
cernlib gks
or
cernlib gks3d
\end{XMP}
which create a file \Lit{'LIB$'} in the user's working directory
which contains the required libraries (the case matters).
The cernlib command may take the switches {\bf -o} and {\bf -n}
to allow the selection of {\bf o}ld or {\bf n}ew library versions.
To compile and link the FORTRAN program 'myprog.ftn' use the commands:
\begin{XMP}
cft77 myprog.ftn
segldr -o myprog.exe myprog.o LIB$
\end{XMP}
At run time GKS reads in the files defining the software fonts.
These are accessed via a an environment variable \Lit{GKS_FONTS} which is
defined by the system login procedures.
\index{include!on CRAY}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory '/cern/gks/pro/utl'.
They may be accessed from a FORTRAN program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} are not available on the CRAY.
\section{\protect\label{sec:decgks}DECGKS: Coexistence with GKSGRAL and Implementation}
One of the advantages of using an ISO standard graphics package
is that users are not limited to the products of a single supplier.
Thus, although the principal GKS implementation supported at CERN
is that of GTS-GRAL, users of Digital Equipment Corporation (DEC)
machines may wish to use the DEC implementation of GKS.
This might be because of the availability of drivers, or because
the performance of DEC software on the company's own machines is
likely to be better than that of third party software suppliers
who are constrained to ensure that their libraries operate in
many different environments.
Whilst there are no major problems in moving between DECGKS and GKSGRAL
there are several implementation dependencies, and these are documented
below. A large number of routines have been added to the set of tools
in GKSPACK (see section on Page~\pageref{sec:gkspref}).
Some of these have been written in order to aid the portability of applications
between different GKS implementations by supplying information
about the Workstation Types and Connection Identifiers of a particular
implementation. Other routines have been provided to emulate extensions of
GKS available in the GKSGRAL implementation. Whilst users of GKSGRAL
will have these routines available in the GKSGRAL library,
users of DECGKS will need to link to an additional library
containing a version of GKSPACK tailored for the DECGKS implementation.
This library is called \Lit{GKSPACK_DEC.OLB}, and on the CERN VAX cluster
may be found in:
\begin{XMP}
GKS_ROOT:[LIB]GKSPACK_DEC.OLB
\end{XMP}
Implementation Dependencies:
\begin{UL}
\item Workstation Types and Connection Ids
Implementations are free to choose whichever Workstation Types and
Connection Identifiers they wish. Thus, those in use by GKSGRAL and
DECGKS do not match. The routines in the library GKSPACK go some way to
alleviating this problem (see section on Page~\pageref{sec:gkspref}).
\item Fonts and Attributes
Neither GKS, nor any other Graphics Standard, defines the shapes of
the characters corresponding to a particular font number. In addition,
a particular implementation may provide access to hardware fonts
on some devices. This also applies to hatch styles and patterns, etc.
The fonts and hatch styles available from GKSGRAL are defined in
on Page~\pageref{sec:impref1}, and a brief comparison of the two implementations
follows:
\begin{XMP}
GTSGRAL | DECGKS
\end{XMP}
Hardware Fonts:
\begin{XMP}
See wk descr. tables | DECWINDOWS : -101 to -113
| UIS : -200 to -202
\end{XMP}
.pa
Software Fonts
\begin{XMP}
-1 to -11: normal,proport. | font 1 = font -1 = DEC GKS
| multinational font
-13 : greek |
-51 : solid filled font |
same font numbers - 100: | -2 to -23: Hershey fonts
idem but italics |
same font numbers - 200: |
idem but monospaced |
same font numbers - 300: |
idem but italics monospaced |
\end{XMP}
Line types
\begin{XMP}
| -1 to -8 DEC specific
\end{XMP}
Marker Types
\begin{XMP}
-101 to -114 GKSGRAL specific | -1 to -13 DEC specific
\end{XMP}
Fill Area Hatch Styles
\begin{XMP}
-101 to -124 (CERN specific) | -1 to -33 (UIS specific)
| -1 to -9 (DECwindows specific)
\end{XMP}
Fill Area Patterns
\begin{XMP}
None | 1 to 196 (UIS specific)
| 1 to 28 (DECwindows specific)
| -29 to -58 (DECwindows specific)
\end{XMP}
\item Data Records
Both the contents and internal format of data records used by
GKSGRAL and DECGKS are different. The format should not affect the majority
of users, who would not require to access data record explicitly.
However, users will be affected by the differences in data record
contents if they make use of facilities to initialize input devices
or use GDPs.
To help solve this problem, higher-level routines have been provided
by GTS-GRAL which hide details of the data record contents.
These include GUARC, GUBEZ1, GUCIR1, GUCIR2, GUCUR1, GUELL1, GUELL2,
and GUMEN2. The library \Lit{GKSPACK_DEC.OLB}, described in section
on Page~\pageref{sec:gkspref}, contains
emulations of these routines which work with DECGKS.
\item Metafiles
Whilst the content of the GTS-GRAL and DECGKS metafiles are logically
the same, the file formats are not. In order that the CERN metafile
utility programs GRVIEW and GRPLOT may be used with metafiles produced
with DECGKS, an option will be introduced into GRCONV to convert them
to the same format as those written by GTS-GRAL (but not vice versa).
Until this feature is installed, anyone wishing to convert a DECGKS
metafile should contact the UCO.
.pa
\item Input
Whilst stroke input requires a trigger for each locator position in the
GTS-GRAL GKS implementation, that of DEC does not, but simply samples
the locator position at fixed time or distance intervals.
Thus, GTS-GRAL's stroke input is more or less equivalent to
calling Request Locator in a loop.
In order to provide functionality when using DECGKS equivalent to that
in GKSGRAL, a CERN-written version of GRQSK may be found in the library
\Lit{GKSPACK_DEC.OLB}.
The DECGKS implementation uses separate windows for messages and
also for string, choice, and valuator input.
The window size depends on the echo area specified in GINST, GINCH,
and GINVL.
\end{UL}
| {
"alphanum_fraction": 0.77297418,
"avg_line_length": 42.062871382,
"ext": "tex",
"hexsha": "f26312715156ec438126c30a98a6747b947728f5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "berghaus/cernlib-docs",
"max_forks_repo_path": "gks/gksch1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "berghaus/cernlib-docs",
"max_issues_repo_path": "gks/gksch1.tex",
"max_line_length": 88,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "berghaus/cernlib-docs",
"max_stars_repo_path": "gks/gksch1.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z",
"num_tokens": 58225,
"size": 219442
} |
\chapter{Names of FORTRAN-77 files}
\index{FORTRAN files}
\begin{table}
\begin{center}
\begin{tabular}{llllllll}
\comp{aababc} & \comp{cnvg} & \comp{diat} & \comp{formxy} & \comp{kab} & \comp{mulliz} & \comp{react1} & \comp{symopr} \\
\comp{aabacd} & \comp{cnvgz} & \comp{diat2} & \comp{forsav} & \comp{lapack} & \comp{mult} & \comp{reada} & \comp{symp} \\
\comp{aabbcd} & \comp{coe} & \comp{digit} & \comp{frame} & \comp{lewis} & \comp{mult33} & \comp{readmo} & \comp{symr} \\
\comp{addhb} & \comp{commop} & \comp{dihed} & \comp{freqcy} & \comp{ligand} & \comp{mxm} & \comp{refer} & \comp{symt} \\
\comp{adjvec} & \comp{commoz} & \comp{dijkl1} & \comp{genun} & \comp{linmin} & \comp{mxmt} & \comp{reorth} & \comp{symtry} \\
\comp{aintgs} & \comp{compct} & \comp{dijkl2} & \comp{geochk} & \comp{local} & \comp{myword} & \comp{repp} & \comp{symtrz} \\
\comp{am1d} & \comp{compfg} & \comp{dipind} & \comp{geout} & \comp{local2} & \comp{names} & \comp{reseq} & \comp{tables} \\
\comp{analyt} & \comp{copy1} & \comp{dipole} & \comp{geoutg} & \comp{localz} & \comp{newflg} & \comp{reset} & \comp{thermo} \\
\comp{anavib} & \comp{copy2} & \comp{dipolz} & \comp{getdat} & \comp{locmin} & \comp{newmat} & \comp{resolv} & \comp{tidy} \\
\comp{atomrs} & \comp{cosmo} & \comp{dofs} & \comp{getgeg} & \comp{lyse} & \comp{nllsq} & \comp{rmopac} & \comp{timer} \\
\comp{axis} & \comp{cross} & \comp{dot} & \comp{getgeo} & \comp{makopr} & \comp{nuchar} & \comp{rotatd} & \comp{timout} \\
\comp{babbbc} & \comp{dang} & \comp{drc} & \comp{getpdb} & \comp{maksym} & \comp{nxtmer} & \comp{rotate} & \comp{tmpi} \\
\comp{babbcd} & \comp{datin} & \comp{drcout} & \comp{getsym} & \comp{makvec} & \comp{optbr} & \comp{rotlmo} & \comp{tmpmr} \\
\comp{bangle} & \comp{dcart} & \comp{dtran2} & \comp{gettxt} & \comp{mamult} & \comp{orient} & \comp{rotmol} & \comp{tmpzr} \\
\comp{bfn} & \comp{delsta} & \comp{dtrans} & \comp{getval} & \comp{mat33} & \comp{osinv} & \comp{rsp} & \comp{tom} \\
\comp{bintgs} & \comp{denrot} & \comp{ef} & \comp{gmetry} & \comp{matou1} & \comp{outer1} & \comp{scfcri} & \comp{txtype} \\
\comp{blas} & \comp{denroz} & \comp{eigen} & \comp{gover} & \comp{matout} & \comp{outer2} & \comp{schmib} & \comp{upcase} \\
\comp{bldsym} & \comp{densit} & \comp{eimp} & \comp{greek} & \comp{mbonds} & \comp{parsav} & \comp{schmit} & \comp{update} \\
\comp{block} & \comp{densiz} & \comp{empiri} & \comp{greenf} & \comp{meci} & \comp{partxy} & \comp{search} & \comp{values} \\
\comp{bonds} & \comp{deri0} & \comp{encoding} & \comp{grid} & \comp{mecid} & \comp{pathk} & \comp{second} & \comp{vecprt} \\
\comp{bondsz} & \comp{deri1} & \comp{enpart} & \comp{h1elec} & \comp{mecih} & \comp{paths} & \comp{selmos} & \comp{vecprz} \\
\comp{brlzon} & \comp{deri2} & \comp{epseta} & \comp{h1elez} & \comp{mecip} & \comp{pdbout} & \comp{set} & \comp{volume} \\
\comp{buildf} & \comp{deri21} & \comp{errion} & \comp{haddon} & \comp{minv} & \comp{perm} & \comp{setupg} & \comp{wallc} \\
\comp{calpar} & \comp{deri22} & \comp{esp} & \comp{hbonds} & \comp{mlmo} & \comp{picopt} & \comp{setupi} & \comp{writmn} \\
\comp{canon} & \comp{deri23} & \comp{exchng} & \comp{hcore} & \comp{mndod} & \comp{pinout} & \comp{setupk} & \comp{writmo} \\
\comp{capcor} & \comp{deritr} & \comp{ffhpol} & \comp{hcorz} & \comp{modchg} & \comp{plato} & \comp{setupr} & \comp{wrtkey} \\
\comp{cartab} & \comp{deriv} & \comp{fillij} & \comp{helect} & \comp{modgra} & \comp{pmep} & \comp{solrot} & \comp{wrttxt} \\
\comp{cdiag} & \comp{dernvo} & \comp{findn1} & \comp{helecz} & \comp{moldat} & \comp{point} & \comp{sort} & \comp{xxx} \\
\comp{charmo} & \comp{dfield} & \comp{finish} & \comp{hybrid} & \comp{molsym} & \comp{polar} & \comp{spline} & \comp{xyzcry} \\
\comp{charst} & \comp{dfock2} & \comp{flepo} & \comp{ijkl} & \comp{molval} & \comp{polarz} & \comp{ss} & \comp{xyzgeo} \\
\comp{charvi} & \comp{dfpsav} & \comp{flushm} & \comp{interp} & \comp{mopac} & \comp{powsav} & \comp{state} & \comp{xyzint} \\
\comp{check} & \comp{dhc} & \comp{fmat} & \comp{intfc} & \comp{mopend} & \comp{powsq} & \comp{supdot} \\
\comp{chi} & \comp{dhcore} & \comp{fock1} & \comp{ionout} & \comp{mpcbds} & \comp{prtdrc} & \comp{superd} \\
\comp{chkion} & \comp{diag} & \comp{fock1z} & \comp{isitsc} & \comp{mpcpop} & \comp{prtgra} & \comp{swap} \\
\comp{chklew} & \comp{diagg} & \comp{fock2} & \comp{iter} & \comp{mpcsyb} & \comp{prtlmo} & \comp{switch} \\
\comp{chrge} & \comp{diagg1} & \comp{fock2z} & \comp{iterz} & \comp{mtxm} & \comp{prttim} & \comp{symdec} \\
\comp{chrgez} & \comp{diagg2} & \comp{fockd2} & \comp{jab} & \comp{mtxmc} & \comp{pulay} & \comp{symh} \\
\comp{ciosci} & \comp{diagi} & \comp{force} & \comp{jcarin} & \comp{mullik} & \comp{quadr} & \comp{symoir}
\end{tabular}
\end{center}
\end{table}
| {
"alphanum_fraction": 0.6034053156,
"avg_line_length": 102.4680851064,
"ext": "tex",
"hexsha": "54d356210519cbfae19577fbd16f6a2ea95205e7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "openmopac/MOPAC-archive",
"max_forks_repo_path": "manuals/MOPAC2000_manual/names_of_files.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "openmopac/MOPAC-archive",
"max_issues_repo_path": "manuals/MOPAC2000_manual/names_of_files.tex",
"max_line_length": 128,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "openmopac/MOPAC-archive",
"max_stars_repo_path": "manuals/MOPAC2000_manual/names_of_files.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z",
"num_tokens": 2215,
"size": 4816
} |
% --------------------------- %
% BDEQueueTest_Revisited Start
% --------------------------- %
\section{\textbf{BDEQueueTest Test}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Particular Case}
\par
The problem we want to solve in this exercise is how do we perform Work
Distribution among threads in an effective way.
\par
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Solution}
\par
The solution proposed in this exercise is by using Work-Stealing Dequeues.
Specifically, we will use a bounded version.
\par
The idea is that we will have a pool of tasks and each task has a queue of Work
to be done. It is possible that some threads finish the jobs in their queues
faster than other. In such situation, this algorithm will allow faster threads
to steal jobs from other threads' queues.
\par
The implementation of this algorithm requires a DEQueue (Double Ended Queue).
When picking a job from a queue, we distinguish two cases. The first one, which
is the most common one, is that one thread picks a job from its own queue. In
that case, the thread uses the \textit{popBottom()} method.
\par
The other case is when a thread steals a job from another queue. In that case,
the thread uses the \textit{popTop()} method.
\par
Let us take a look at the interesting methods.
\par
\textit{popBottom()} distinguishes between two cases. If there is a conflict
between a \textit{popBottom()} and \textit{popTop()}, then it resets the
bookmark of the top of the queue using a \textit{compareAndSet()}. If it
succeeds, then it means that our method won and it returns the element. Other
wise it means that the stealer won, and we have to return null.
\par
If there is no conflict between the two pop methods, then we simply return the
element. There is no need to call a \textit{compareAndSet()}.
\par
\hfill
\begin{lstlisting}[style=numbers]
Runnable popBottom() {
// is the queue empty?
if (bottom == 0) // empty `\label{line:steal:empty}`
return null;
bottom--;
// bottom is volatile to assure all reads beyond this line see it
Runnable r = tasks[bottom];
int[] stamp = new int[1];
int oldTop = top.get(stamp), newTop = 0;
int oldStamp = stamp[0], newStamp = oldStamp + 1;
// no conflict with thieves `\label{line:steal:noconflict}`
if (bottom > oldTop)
return r;
// possible conflict: try to pop it ourselves
if (bottom == oldTop) {
// even if stolen by other, queue will be empty, reset bottom
bottom = 0;
if (top.compareAndSet(oldTop, newTop, oldStamp, newStamp))
return r;
}
return null;
}
\end{lstlisting}
\hfill
\par
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Experiment Description}
\par
Two test cases are provided with this program:
\begin{itemize}
\item testSequential. 16 threads are spawned. Each thread pushes a value in our
DEQueue. After that, even threads call \textit{popTop()} and odd threads call
\textit{popBottom()}. Depending on the pop'd value, an associated slot in an
array is marked as true. The test checks that no slot in the array is marked
true twice, since that would mean that a conflict between the pop methods was
not resolved correctly.
\item testConcurrent. Again, 16 threads are spawned and each thread first pushes
a value into our DEQueue. After that, all threads are in competition to pop the
values. The idea is that some threads will call \textit{popTop()} to steal from
other's queues. However, at the end we should still see the same invariant as in
the previous test case.
\end{itemize}
\par
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Sample Results and Interpretation}
\par
The result of the execution of the test cases was as follows:
\par
\hfill
\begin{verbatim}
[oraadm@gdlaa008 Steal]$ junit steal.BDEQueueTest
.sequential pushBottom and popBottom
.concurrent pushBottom and popBottom
Time: 0.055
OK (2 tests)
\end{verbatim}
\hfill
\par
The tests passed every time
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% --------------------------- %
% BDEQueueTest_Revisited End
% --------------------------- %
| {
"alphanum_fraction": 0.6910930175,
"avg_line_length": 37.5277777778,
"ext": "tex",
"hexsha": "59091414e2bcbeccd6ab28708e824a6ed0686b51",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rzavalet/multiprocessor",
"max_forks_repo_path": "Report/sections/BDEQueueTest_Revisited.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rzavalet/multiprocessor",
"max_issues_repo_path": "Report/sections/BDEQueueTest_Revisited.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rzavalet/multiprocessor",
"max_stars_repo_path": "Report/sections/BDEQueueTest_Revisited.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1007,
"size": 4053
} |
\documentclass[a4paper, 10pt, twoside, headings=small]{scrartcl}
\input{../options.tex}
\setmainlanguage[]{english}
\title{11 Longing for More}
\author{Ellen G.\ White}
\date{2021/03 Rest in Christ}
\begin{document}
\maketitle
\thispagestyle{empty}
\pagestyle{fancy}
\begin{multicols}{2}
\section*{Saturday – Longing for More}
For hundreds of years the Scriptures had been translated into the Greek language, then widely spoken throughout the Roman Empire. The Jews were scattered everywhere, and their expectation of the Messiah’s coming was to some extent shared by the Gentiles. Among those whom the Jews styled heathen were men who had a better understanding of the Scripture prophecies concerning the Messiah than had the teachers in Israel. There were some who hoped for His coming as a deliverer from sin. Philosophers endeavored to study into the mystery of the Hebrew economy. But the bigotry of the Jews hindered the spread of the light. Intent on maintaining the separation between themselves and other nations, they were unwilling to impart the knowledge they still possessed concerning the symbolic service. The true Interpreter must come. The One whom all these types prefigured must explain their significance.
Through nature, through types and symbols, through patriarchs and prophets, God had spoken to the world. Lessons must be given to humanity in the language of humanity. The Messenger of the covenant must speak. His voice must be heard in His own temple. Christ must come to utter words which should be clearly and definitely understood. He, the author of truth, must separate truth from the chaff of man’s utterance, which had made it of no effect. The principles of God’s government and the plan of redemption must be clearly defined. The lessons of the Old Testament must be fully set before men.—The Desire of Ages, pp. 33, 34.
We should seek to follow more closely the example of Christ, the great Shepherd, as He worked with His little company of disciples, studying with them and with the people the Old Testament Scriptures. His active ministry consisted not merely in sermonizing but in educating the people. As He passed through villages, He came in personal contact with the people in their homes, teaching, and ministering to their necessities. As the crowds that followed Him increased, when He came to a favorable place, He would speak to them, simplifying His discourses by the use of parables and symbols.—Evangelism, p. 203.
Christ’s manner of teaching was beautiful and attractive, and it was ever characterized by simplicity. He unfolded the mysteries of the kingdom of heaven through the use of figures and symbols with which His hearers were familiar; and the common people heard Him gladly, for they could comprehend His words. There were no high-sounding words used, to understand which it was necessary to consult a dictionary.—Counsels to Parents, Teachers, and Students, p. 240.
The Jewish economy, bearing the signature of Heaven, had been instituted by Christ Himself. In types and symbols the great truths of redemption were veiled. Yet when Christ came, the Jews did not recognize Him to whom all these symbols pointed. They had the word of God in their hands; but the traditions which had been handed down from generation to generation, and the human interpretation of the Scriptures, hid from them the truth as it is in Jesus. The spiritual import of the sacred writings was lost. The treasure house of all knowledge was open to them, but they knew it not.—Christ’s Object Lessons, p. 104.
\section*{Sunday – Baptized Into Moses}
The example of ancient Israel is given as a warning to the people of God, that they may avoid unbelief and escape His wrath. If the iniquities of the Hebrews had been omitted from the Sacred Record, and only their virtues recounted, their history would fail to teach us the lesson that it does. …
The principles of justice required a faithful narration of facts for the benefit of all who should ever read the Sacred Record. Here we discern the evidences of divine wisdom. We are required to obey the law of God, and are not only instructed as to the penalty of disobedience, but we have narrated for our benefit and warning the history of Adam and Eve in Paradise, and the sad results of their disobedience of God’s commands. … Their example is given us as a warning against disobedience, that we may be sure that the wages of sin is death, that God’s retributive justice never fails, and that He exacts from His creatures a strict regard for His commandments. …
There before us lie the lives of the believers, with all their faults and follies, which are intended as a lesson to all the generations following them. If they had been without foible they would have been more than human, and our sinful natures would despair of ever reaching such a point of excellence. But seeing where they struggled and fell, where they took heart again and conquered through the grace of God, we are encouraged, and led to press over the obstacles that degenerate nature places in our way.—Testimonies for the Church, vol. 4, pp. 11, 12.
The Old Testament is the gospel in figures and symbols. The New Testament is the substance. One is as essential as the other. The Old Testament presents lessons from the lips of Christ, and these lessons have not lost their force in any particular.—Selected Messages, book 2, p. 104.
God commanded Moses for Israel, “Let them make Me a sanctuary; that I may dwell among them” (Exodus 25:8), and He abode in the sanctuary, in the midst of His people. Through all their weary wandering in the desert, the symbol of His presence was with them. So Christ set up His tabernacle in the midst of our human encampment. He pitched His tent by the side of the tents of men, that He might dwell among us, and make us familiar with His divine character and life. “The Word became flesh, and tabernacled among us (and we beheld His glory, glory as of the Only Begotten from the Father), full of grace and truth.” John 1:14, R. V., margin.—The Desire of Ages, p. 23.
\section*{Monday – Ritual and Sacrifices}
Nearly two thousand years ago, a voice of mysterious import was heard in heaven, from the throne of God, “Lo, I come.” … Christ was about to visit our world, and to become incarnate. He says, “A body hast Thou prepared Me.” Had He appeared with the glory that was His with the Father before the world was, we could not have endured the light of His presence. That we might behold it and not be destroyed, the manifestation of His glory was shrouded. His divinity was veiled with humanity,—the invisible glory in the visible human form.
This great purpose had been shadowed forth in types and symbols. The burning bush, in which Christ appeared to Moses, revealed God. The symbol chosen for the representation of the Deity was a lowly shrub, that seemingly had no attractions. This enshrined the Infinite. The all-merciful God shrouded His glory in a most humble type, that Moses could look upon it and live. So in the pillar of cloud by day and the pillar of fire by night, God communicated with Israel, revealing to men His will, and imparting to them His grace. God’s glory was subdued, and His majesty veiled, that the weak vision of finite men might behold it. … His glory was veiled, His greatness and majesty were hidden, that He might draw near to sorrowful, tempted men.—The Desire of Ages, p. 23.
Every morning and evening a lamb of a year old was burned upon the altar, with its appropriate meat offering, thus symbolizing the daily consecration of the nation to Jehovah, and their constant dependence upon the atoning blood of Christ. … The priests were to examine all animals brought as a sacrifice, and were to reject every one in which a defect was discovered. Only an offering “without blemish” could be a symbol of His perfect purity who was to offer Himself as “a lamb without blemish and without spot.” 1 Peter 1:19.
The apostle Paul points to these sacrifices as an illustration of what the followers of Christ are to become. He says, “I beseech you therefore, brethren, by the mercies of God, that ye present your bodies a living sacrifice, holy, acceptable unto God, which is your reasonable service.” Romans 12:1.—Patriarchs and Prophets, p. 352.
Christ was the Lamb slain from the foundation of the world. To many it has been a mystery why so many sacrificial offerings were required in the old dispensation, why so many bleeding victims were led to the altar. But the great truth that was to be kept before men, and imprinted upon mind and heart, was this, “Without shedding of blood is no remission.” In every bleeding sacrifice was typified “the Lamb of God, which taketh away the sin of the world.”
Christ Himself was the originator of the Jewish system of worship, in which, by types and symbols, were shadowed forth spiritual and heavenly things. Many forgot the true significance of these offerings; and the great truth that through Christ alone there is forgiveness of sin, was lost to them.—Ellen G. White Comments, in The SDA Bible Commentary, vol. 7, pp. 932, 933.
\section*{Tuesday – The “Example” of Rest}
There remaineth therefore a rest to the people of God. For he that is entered into his rest, he also hath ceased from his own works, as God [did] from his. Let us labour therefore to enter into that rest, lest any man fall after the same example of unbelief. [Hebrews 4:9–11].
The rest here spoken of is the rest of grace, obtained by following the prescription, Labor diligently. Those who learn of Jesus His meekness and lowliness find rest in the experience of practicing His lessons. It is not in indolence, in selfish ease and pleasure-seeking, that rest is obtained. Those who are unwilling to give the Lord faithful, earnest, loving service will not find spiritual rest in this life or in the life to come. Only from earnest labor comes peace and joy in the Holy Spirit—happiness on earth and glory hereafter.—Ellen G. White Comments, in The SDA Bible Commentary, vol. 7, p. 928.
Rest is found when all self-justification, all reasoning from a selfish standpoint, is put away. Entire self-surrender, an acceptance of His ways, is the secret of perfect rest in His love. Do just what He has told you to do, and be assured that God will do all that He has said He would do. Have you come to Him, renouncing all your makeshifts, all your unbelief, all your self-righteousness? Come just as you are, weak, helpless, and ready to die.
What is the “rest” promised?—It is the consciousness that God is true, that He never disappoints the one who comes to Him. His pardon is full and free, and His acceptance means rest to the soul, rest in His love.—Our High Calling, p. 97.
We shall be saved eternally when we enter in through the gates into the city. Then we may rejoice that we are saved, eternally saved. But until then we need to heed the injunction of the apostle, and to “fear, lest, a promise being left us of entering into his rest, any of us should seem to come short of it” (Hebrews 4:1). Having a knowledge of Canaan, singing the songs of Canaan, rejoicing in the prospect of entering into Canaan, did not bring the children of Israel into the vineyards and olive groves of the Promised Land. They could make it theirs in truth only by occupation, by complying with the conditions, by exercising living faith in God, by appropriating His promises to themselves.
Christ is the author and finisher of our faith, and when we yield to His hand we shall steadily grow in grace and in the knowledge of our Lord and Saviour. We shall make progress until we reach the full stature of men and women in Christ. Faith works by love, and purifies the soul, expelling the love of sin that leads to rebellion against, and transgression of, the law of God.—That I May Know Him, p. 162.
\section*{Wednesday – “Harden Not Your Hearts”}
God requires prompt and unquestioning obedience of His law; but men are asleep or paralyzed by the deceptions of Satan, who suggests excuses and subterfuges, and conquers their scruples, saying as he said to Eve in the garden: “Ye shall not surely die.” Disobedience not only hardens the heart and conscience of the guilty one, but it tends to corrupt the faith of others. That which looked very wrong to them at first, gradually loses this appearance by being constantly before them, till finally they question whether it is really sin and unconsciously fall into the same error. …
Many are the hindrances that lie in the path of those who would walk in obedience to the commandments of God. There are strong and subtle influences that bind them to the ways of the world, but the power of the Lord can break these chains. He will remove every obstacle from before the feet of His faithful ones or give them strength and courage to conquer every difficulty, if they earnestly beseech His help. All hindrances will vanish before an earnest desire and persistent effort to do the will of God at any cost to self, even if life itself is sacrificed. Light from heaven will illuminate the darkness of those, who, in trial and perplexity, go forward, looking unto Jesus as the Author and Finisher of their faith.—Testimonies for the Church, vol. 4, pp. 146, 147.
The coldness of ice, the hardness of iron, the impenetrable, unimpressible nature of rock—all these find a counterpart in the character of many a professed Christian. It was thus that the Lord hardened the heart of Pharaoh. God spoke to the Egyptian king by the mouth of Moses, giving him the most striking evidences of divine power; but the monarch stubbornly refused the light which would have brought him to repentance. God did not send a supernatural power to harden the heart of the rebellious king, but as Pharaoh resisted the truth, the Holy Spirit was withdrawn, and he was left to the darkness and unbelief which he had chosen. By persistent rejection of the Spirit’s influence, men cut themselves off from God. He has in reserve no more potent agency to enlighten their minds. No revelation of His will can reach them in their unbelief.—Our High Calling, p. 160.
Christ says: “I have chosen you, and ordained you, that ye should go and bring forth fruit, and that your fruit should remain” (John 15:16). As Christ’s ambassador, I would entreat of all who read these lines to take heed while it is called today. “If ye will hear his voice, harden not your hearts” (Hebrews 3:15; 4:7). Without waiting a moment, inquire, What am I to Christ? and what is Christ to me? What is my work? What is the character of the fruit I bear?—This Day With God, p. 51.
\section*{Thursday – Conquering a Heavenly City}
No distinction on account of nationality, race, or caste, is recognized by God. He is the Maker of all mankind. All men are of one family by creation, and all are one through redemption. Christ came to demolish every wall of partition, to throw open every compartment of the temple, that every soul may have free access to God. His love is so broad, so deep, so full, that it penetrates everywhere. It lifts out of Satan’s circle the poor souls who have been deluded by his deceptions. It places them within reach of the throne of God, the throne encircled by the rainbow of promise.
In Christ there is neither Jew nor Greek, bond nor free. All are brought nigh by His precious blood. (Galatians 3:28; Ephesians 2:13).
Whatever the difference in religious belief, a call from suffering humanity must be heard and answered. Where bitterness of feeling exists because of difference in religion, much good may be done by personal service. Loving ministry will break down prejudice, and win souls to God.—Christ’s Object Lessons, p. 386.
It is impossible for us, of ourselves, to escape from the pit of sin in which we are sunken. Our hearts are evil, and we cannot change them. “Who can bring a clean thing out of an unclean? not one.” “The carnal mind is enmity against God: for it is not subject to the law of God, neither indeed can be.” Job 14:4; Romans 8:7. Education, culture, the exercise of the will, human effort, all have their proper sphere, but here they are powerless. They may produce an outward correctness of behavior, but they cannot change the heart; they cannot purify the springs of life. There must be a power working from within, a new life from above, before men can be changed from sin to holiness. That power is Christ. His grace alone can quicken the lifeless faculties of the soul, and attract it to God, to holiness.—Steps to Christ, p. 18.
Many make a serious mistake in their religious life by keeping the attention fixed upon their feelings and thus judging of their advancement or decline. Feelings are not a safe criterion. We are not to look within for evidence of our acceptance with God. We shall find there nothing but that which will discourage us. Our only hope is in “looking unto Jesus the Author and Finisher of our faith.” There is everything in Him to inspire with hope, with faith, and with courage. He is our righteousness, our consolation and rejoicing.
Those who look within for comfort will become weary and disappointed. A sense of our weakness and unworthiness should lead us with humility of heart to plead the atoning sacrifice of Christ. As we rely upon His merits we shall find rest and peace and joy. He saves to the uttermost all who come unto God by Him.—Testimonies for the Church, vol. 5, pp. 199, 200.
\section*{Friday – Further Thought}
\setlength{\parindent}{0pt}The Upward Look, “Mix Faith With Hearing,” p. 75;
Spiritual Gifts, “Facts of Faith,” vol. 3, pp. 295, 296.
\end{multicols}
\end{document}
| {
"alphanum_fraction": 0.7869557833,
"avg_line_length": 162.0458715596,
"ext": "tex",
"hexsha": "56b33fee8b1ff3222cd69acab7962ed0266d4d7d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4eed4bd2ebd0fd5b33764170427c4f24a2f8f7c9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ch101112/egw_comments_scraper",
"max_forks_repo_path": "output/egw_en_11.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4eed4bd2ebd0fd5b33764170427c4f24a2f8f7c9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ch101112/egw_comments_scraper",
"max_issues_repo_path": "output/egw_en_11.tex",
"max_line_length": 898,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "4eed4bd2ebd0fd5b33764170427c4f24a2f8f7c9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ch101112/egw_comments_scraper",
"max_stars_repo_path": "output/egw_en_11.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-06T20:08:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-11T19:01:26.000Z",
"num_tokens": 4013,
"size": 17663
} |
\subsection{Useful Macros}
\label{macros}
\subsubsection{Macros}
Macros are simple executable files containing standard linux commands.
A number of the are supplied with DL\_POLY and are found in the
{\em execute} sub-directory\index{sub-directory}. The available macros are as follows.
{\sl
\begin{itemize}
\item cleanup
\item copy
\item gopoly
\item gui
\item select
\item store
\item supa
\end{itemize}
}
The function of each of these is described below. It is worth noting
that most of these functions can be performed by the \D{} java GUI
\cite{smith-gui}. (It may be necessary to set the {\em execute} access
to the macro using the linux command:
\noindent chmod -x {\em macro}
\noindent where {\em macro} is one of the above names.)
\subsubsection{{\sl cleanup}}
{\sl cleanup} removes several standard data files from the {\em
execute} sub-directory\index{sub-directory}. It contains the linux commands:
\begin{verbatim}
#!/bin/tcsh
#
# DL_POLY utility to clean up after a program run
#
if (-e CFGMIN) rm CFGMIN
if (-e OUTPUT) rm OUTPUT
if (-e RDFDAT) rm RDFDAT
if (-e REVCON) rm REVCON
if (-e REVIVE) rm REVIVE
if (-e REVOLD) rm REVOLD
if (-e STATIS) rm STATIS
if (-e ZDNDAT) rm ZDNDAT
\end{verbatim}
\noindent and removes the files (if present) CFGMIN, OUTPUT, REVCON,
REVOLD, STATIS, REVIVE, RDFDAT and ZDNDAT. (Useful data should
be stored elsewhere beforehand!)
\subsubsection{{\sl copy}}
{\sl copy} invokes the linux commands:
\begin{verbatim}
#!/bin/tcsh
#
# utility to set up data for DL_POLY continuation run
#
mv CONFIG CONFIG.OLD
mv REVCON CONFIG
mv REVIVE REVOLD
\end{verbatim}
\noindent which collectively prepare the DL\_POLY files in the {\em execute}
sub-directory\index{sub-directory} for the continuation of a simulation. It is
always a good idea to store these files elsewhere in addition to using this
macro.
\subsubsection{{\sl gopoly}}
{\sl gopoly} is a simple script to submit a DL\_POLY job to a standard linux parallel machine.
\begin{verbatim}
mpirun -np $1 DLPOLY.X
\end{verbatim}
\noindent Normally the job is submitted by the linux command:\\~\\
{\sl gopoly 8}\\~\\
\noindent where (in this case) {\sl 8} specifies the use of 8 processors.
If the {\em serial} version of \DD{} is being used it is of course
acceptable to simply type:
\begin{verbatim}
DLPOLY.X &
\end{verbatim}
\subsubsection{\sl gui}
{\sl gui} is a macro that starts up the \D{} Java GUI. It invokes the
following linux commands:
\begin{verbatim}
java -jar ../java/GUI.jar
\end{verbatim}
In other words the macro invokes the Java Virtual Machine which
executes the instructions in the Java archive file GUI.jar, which is
stored in the {\em java} subdirectory of \D{}. (Note: Java 1.3.0 or a
higher version is required to run the GUI.)
\subsubsection{{\sl select}}
{\sl select} is a macro enabling easy selection of one of the test
cases. It invokes the linux commands:
\begin{verbatim}
#!/bin/tcsh
#
# DL_POLY utility to gather test data files for program run
#
cp ../data/TEST$1/$2/CONTROL CONTROL
cp ../data/TEST$1/$2/FIELD FIELD
cp ../data/TEST$1/$2/CONFIG CONFIG
if (-e ../data/TEST$1/$2/TABLE)then
cp ../data/TEST$1/$2/TABLE TABLE
else if (-e ../data/TEST$1/$2/TABEAM)then
cp ../data/TEST$1/$2/TABEAM TABEAM
endif
\end{verbatim}
\noindent {\sl select} requires two arguments to be specified:\\~\\
{\sl select n a}\\~\\
\noindent where {\sl n} is the (integer) test case number, which
ranges from 1 to 20 and {\sl a} is the character string LF, VV, RB or
CB according to which algorithm leapfrog (LF), velocity Verlet (VV),
(RB) rigid body minimisation or (CB) constraint bond minimisation is
required.
This macro sets up the required input files in the {\em execute}
sub-directory\index{sub-directory} to run the {\sl n}-th test case.
\subsubsection{{\sl store}}
The {\sl store} macro provides a convenient way of moving data back
from the {\em execute} sub-directory\index{sub-directory} to the {\em
data} sub-directory\index{sub-directory}. It invokes the linux
commands:
\begin{verbatim}
#!/bin/tcsh
#
# DL_POLY utility to archive I/O files to the data directory
#
if !(-e ../data/TEST$1) then
mkdir ../data/TEST$1
endif
if !(-e ../data/TEST$1/$2) then
mkdir ../data/TEST$1/$2
endif
mv CONTROL ../data/TEST$1/$2/CONTROL
mv FIELD ../data/TEST$1/$2/FIELD
mv CONFIG ../data/TEST$1/$2/CONFIG
mv OUTPUT ../data/TEST$1/$2/OUTPUT
mv REVIVE ../data/TEST$1/$2/REVIVE
mv REVCON ../data/TEST$1/$2/REVCON
if (-e TABLE) then
mv TABLE ../data/TEST$1/$2/TABLE
endif
if (-e TABEAM) then
mv TABEAM ../data/TEST$1/$2/TABEAM
endif
if (-e STATIS) then
mv STATIS ../data/TEST$1/$2/STATIS
endif
if (-e RDFDAT) then
mv RDFDAT ../data/TEST$1/$2/RDFDAT
endif
if (-e ZDNDAT) then
mv ZDNDAT ../data/TEST$1/$2/ZDNDAT
endif
if (-e CFGMIN) then
mv CFGMIN ../data/TEST$1/$2/CFGMIN
endif
\end{verbatim}
\noindent which first creates a new DL\_POLY {\em data/TEST..}
sub-directory\index{sub-directory} if necessary and then moves the
standard DL\_POLY output data files into it.
{\sl store} requires two arguments:\\~\\
{\sl store n a}\\~\\
\noindent where {\sl n} is a unique string or number to label the
output data in the {\em data/TESTn} sub-directory and {\sl a} is the
character string LF, VV, RB or CB according to which algorithm
leapfrog (LF), velocity Verlet (VV), (RB) rigid body minimisation or
(CB) constraint bond minimisation has been performed.
\subsubsection{{\sl supa}}
The {\sl supa} macro provides a convenient way of running the DL\_POLY test
cases in batch mode. It is currently structured to submit batch jobs to the
Daresbury Xeon cluster, but can easily be adapted for other machines where
batch queuing is possible. The key statement in this context in the `qsub'
commmand which submits the {\sl gopoly} script described above. This statement
may be replaced by the equivalent batch queuing command for your machine. The
text of {\sl supa} is given below.
\begin{verbatim}
#!/bin/tcsh
#
# DL_POLY script to run multiple test cases
# note use of qsub in job submission - may
# need replacing
#
set n=$1
set m=$2
set TYPE="LF VV CB RB"
while ($n <= $m)
if !(-e TEST$n) mkdir TEST$n
cd TEST$n
echo TEST$n
foreach typ ($TYPE)
if (-e ../../data/TEST$n/$typ ) then
if !(-e $typ) mkdir $typ
cd $typ
cp ../../../data/TEST$n/$typ/CONTROL .
cp ../../../data/TEST$n/$typ/CONFIG .
cp ../../../data/TEST$n/$typ/FIELD .
if (-e ../../../data/TEST$n/$typ/TABLE) \
cp ../../../data/TEST$n/$typ/TABLE .
if(-e ../../../data/TEST$n/$typ/TABEAM) \
cp ../../../data/TEST$n/$typ/TABEAM .
qsub ../../gopoly
cd ../
endif
end
cd ../
set n=`expr $n + 1`
end
\end{verbatim}
\noindent This macro creates working {\em TEST} directories in
the {\em execute} sub-directory\index{sub-directory}; one for each
test case invoked. Appropriate sub-directories of these are created for
leapfrog (LF), velocity Verlet(VV), rigid body minimisation (RB) and
constraint bond minimisation (CB). Note that {\sl supa} must be run
from the {\em execute} sub-directory.
{\sl supa} requires two arguments:\\~\\
{\sl supa n m}\\~\\
\noindent where {\sl n} and {\sl m} are integers defining the first
and last test case to be run.
\clearpage
| {
"alphanum_fraction": 0.708522649,
"avg_line_length": 29.5243902439,
"ext": "tex",
"hexsha": "c5c4d60e6d63acd06cf713a2049b334b321dc1fd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "zzalscv2/DL_POLY_Classic",
"max_forks_repo_path": "manual/macros.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "zzalscv2/DL_POLY_Classic",
"max_issues_repo_path": "manual/macros.tex",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "zzalscv2/DL_POLY_Classic",
"max_stars_repo_path": "manual/macros.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2167,
"size": 7263
} |
\documentclass{sig-alternate-05-2015}
\usepackage{xcolor}
\usepackage{pifont}
\usepackage{paralist} % inparaenum support
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{microtype}
\newcommand{\quadrat}{\ding{110}}%
\begin{document}
\sloppy
% No indent of paragraph
\parindent 0pt
% Copyright
\CopyrightYear{2016}
\setcopyright{acmlicensed}
\conferenceinfo{SIGSPATIAL'16,}{October 31-November 03, 2016, Burlingame, CA,
USA}
\isbn{978-1-4503-4589-7/16/10}\acmPrice{\$15.00}
\doi{http://dx.doi.org/10.1145/2996913.2996931}
%
% --- Author Metadata here ---
%\conferenceinfo{WOODSTOCK}{'97 El Paso, Texas USA}
%\CopyrightYear{2007} % Allows default copyright year (20XX) to be over-ridden
%- IF NEED BE.
%\crdata{0-12345-67-8/90/01} % Allows default copyright data
%(0-89791-88-6/97/05) to be over-ridden - IF NEED BE.
% --- End of Author Metadata ---
\title{BigGIS: A Continuous Refinement Approach to Master Heterogeneity and
Uncertainty in Spatio-Temporal Big Data (Vision Paper)}
%\titlenote{(Produces the permission block, and
%copyright information). For use with
%SIG-ALTERNATE.CLS. Supported by ACM.}}
%\subtitle{[Extended Abstract]
%\titlenote{A full version of this paper is available as
%\textit{Author's Guide to Preparing ACM SIG Proceedings Using
%\LaTeX$2_\epsilon$\ and BibTeX} at
%\texttt{www.acm.org/eaddress.htm}}}
%
% You need the command \numberofauthors to handle the 'placement
% and alignment' of the authors beneath the title.
%
% For aesthetic reasons, we recommend 'three authors at a time'
% i.e. three 'name/affiliation blocks' be placed beneath the title.
%
% NOTE: You are NOT restricted in how many 'rows' of
% "name/affiliations" may appear. We just ask that you restrict
% the number of 'columns' to three.
%
% Because of the available 'opening page real-estate'
% we ask you to refrain from putting more than six authors
% (two rows with three columns) beneath the article title.
% More than six makes the first-page appear very cluttered indeed.
%
% Use the \alignauthor commands to handle the names
% and affiliations for an 'aesthetic maximum' of six authors.
% Add names, affiliations, addresses for
% the seventh etc. author(s) as the argument for the
% \additionalauthors command.
% These 'additional authors' will be output/set for you
% without further effort on your part as the last section in
% the body of your article BEFORE References or any Appendices.
\numberofauthors{1} % in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
\author{
\alignauthor
Patrick Wiener\textsuperscript{1}, Manuel Stein\textsuperscript{2}, Daniel
Seebacher\textsuperscript{2}, Julian Bruns\textsuperscript{3}, Matthias
Frank\textsuperscript{3}, Viliam Simko\textsuperscript{3},
Stefan Zander\textsuperscript{3}, Jens
Nimis\textsuperscript{1}\\~\\
\affaddr{\textsuperscript{1}University of Applied Sciences Karlsruhe,
Karlsruhe, Germany}\\
\email{\{patrick.wiener, jens.nimis\}@hs-karlsruhe.de}\\
\affaddr{\textsuperscript{2}Data Analysis and Visualization Group, University
of Konstanz, Konstanz, Germany}\\
\email{\{stein, seebacher\}@dbvis.inf.uni-konstanz.de}\\
\affaddr{\textsuperscript{3}FZI Research Center for Information Technology,
Karlsruhe, Germany}\\
\email{\{bruns, frank, simko, zander\}@fzi.de}
}
%\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author
%\titlenote{Maybe put address, e-mail here if allowed}
%\alignauthor
%%Patrick Wiener,\\Prof. Dr. Jens Nimis\\
%Patrick Wiener\\
% \affaddr{University of Applied Sciences Karlsruhe}\\
%% \affaddr{Moltkestr. 30}\\
% \affaddr{Karlsruhe, Germany}\\
% \email{[email protected]}
%% 2nd. author
%\alignauthor
%%Julian Bruns,\\Matthias Frank,\\Dr. Viliam Simko\\
%Julian Bruns\\
% \affaddr{FZI Research Center for Information Technology}\\
%% \affaddr{Haid-und-Neu-Str. 10-14}\\
% \affaddr{Karlsruhe, Germany}\\
% \email{[email protected]}
%% 3rd. author
%\alignauthor
%%Manuel Stein,\\Daniel Seebacher\\
%Daniel Seebacher\\
% \affaddr{University of Konstanz}\\
% \affaddr{Konstanz, Germany}\\
% \email{[email protected]}
%\and % use '\and' if you need 'another row' of author names
%%% 4th. author
%\alignauthor
%Matthias Frank\\
% \affaddr{FZI Research Center for Information Technology}\\
%% \affaddr{Haid-und-Neu-Str. 10-14}\\
% \affaddr{Karlsruhe, Germany}\\
% \email{[email protected]}
%%% 5th. author
%\alignauthor
%Manuel Stein\\
%% \affaddr{Data Analysis and Visualization Group}\\
% \affaddr{University of Konstanz}\\
% \affaddr{Konstanz, Germany}\\
% \email{[email protected]}
%%% 6th. author
%\alignauthor
%Prof. Dr.-Ing. Jens Nimis\\
% \affaddr{University of Applied Sciences Karlsruhe}\\
%% \affaddr{Moltkestr. 30}\\
% \affaddr{Karlsruhe, Germany}\\
% \email{[email protected]}
%}
% There's nothing stopping you putting the seventh, eighth, etc.
% author on the opening page (as the 'third row') but we ask,
% for aesthetic reasons that you place these 'additional authors'
% in the \additional authors block, viz.
%\additionalauthors{Dr. Viliam Simko (FZI Research Center for Information
%Technology, email: {\texttt{[email protected]}})}
%\date{30 July 1999}
% Just remember to make sure that the TOTAL number of authors
% is the number that will appear on the first page PLUS the
% number that will appear in the \additionalauthors section.
\maketitle
\begin{abstract}
Geographic information systems (GIS) are important for decision support
based on spatial data. Due to technical and economical progress an ever
increasing number of data sources are available leading to a rapidly growing
fast and unreliable amount of data that can be beneficial
\begin{inparaenum}[(1)]
\item in the approximation of multivariate and causal predictions of future
values as well as
\item in robust and proactive decision-making processes.
\end{inparaenum}
However, today's GIS are not designed for such big data demands and require new
methodologies to effectively model uncertainty and generate meaningful
knowledge. As a consequence, we introduce \textit{BigGIS}, a predictive and
prescriptive spatio-temporal analytics platform, that symbiotically
combines big data analytics, semantic web technologies and visual analytics
methodologies. We present a novel continuous refinement model and show future
challenges as an intermediate result of a collaborative research project into
big data methodologies for spatio-temporal analysis and design for a big data
enabled GIS.
\end{abstract}
%
% The code below should be generated by the tool at
% http://dl.acm.org/ccs.cfm
% Please copy and paste the code instead of the example below.
%
\begin{CCSXML}
<ccs2012>
<concept>
<concept_id>10002951.10003227.10003236.10003237</concept_id>
<concept_desc>Information systems~Geographic information systems</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10002951.10003227.10010926</concept_id>
<concept_desc>Information systems~Computing platforms</concept_desc>
<concept_significance>300</concept_significance>
</concept>
<concept>
<concept_id>10003120.10003145.10003147.10010365</concept_id>
<concept_desc>Human-centered computing~Visual analytics</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10011007.10011006.10011039.10011311</concept_id>
<concept_desc>Software and its engineering~Semantics</concept_desc>
<concept_significance>100</concept_significance>
</concept>
</ccs2012>
\end{CCSXML}
\ccsdesc[500]{Information systems~Geographic information systems}
%\ccsdesc[500]{Information systems~Computing platforms}
\ccsdesc[500]{Human-centered computing~Visual analytics}
\ccsdesc[500]{Software and its engineering~Semantics}
%
% End generated code
%
%
% Use this command to print the description
%
\printccsdesc
% We no longer use \terms command
%\terms{Theory}
%\category{C.1.3}{Other Architecture Styles}{Data-flow architectures}
%\category{H.1.2}{User/Machine Systems}{Human information processing}
%\category{H.2.8}{Database Applications}{Data mining, Spatial databases and
%GIS}
\keywords{knowledge generation, big data analytics, data architecture}
\section{Introduction}
\label{sec:intro}
GIS have long been used to support the decision-making
process~\cite{Crossland1995} in many domains like civil planning, environment
and nature protection or emergency management. Thereby, geospatial data have
always been big data. Petabytes of remotely sensed archival geodata
(\textit{volume}) and a rapidly increasing amount of real-time sensor data
streams (\textit{velocity}) accelerate the need for big data analytics in order
to effectively model and efficiently process complex spatio-temporal problems.
In the past, limited access to computing power has been a
bottleneck~\cite{OGC2013}. However, in the era of cloud computing,
leveraging cloud-based resources is a widely adopted pattern.
In addition, with the advent of big data analytics, performing massively
parallel analytical tasks on large-scale data at rest or data in motion is as
well becoming a feasible approach shaping the design of today's GIS. Although
scaling out enables GIS to tackle the aforementioned big data induced
requirements, there are still two major open issues. Firstly, dealing with
varying data types across multiple data sources (\textit{variety}) lead to data
and schema heterogeneity, e.g., to describe locations such as addresses,
relative spatial relationships or different coordinates reference
systems~\cite{Frank.2016b}. Secondly, modelling the inherent uncertainties in
data (\textit{veracity}), e.g., real-world noise and errorneous values due to
the nature of the data collecting process. Both being crucial tasks in data
management and analytics that directly affect the information retrieval and
decision-making quality and moreover the generated knowledge on human-side
(\textit{value}). Current approaches mainly address batch and stream analytics
in their design that is oftentimes implemented as a closed unified analytical
system~\cite{Thakur2015}. While the importance of such systems to
efficiently deal with large amount of data is obvious, computers miss the
cognition and perception of human analysis to create hidden connections between
data and problem domain~\cite{SSS+14a}.
In this paper, we present the vision of \textit{BigGIS}, a next generation
predictive and prescriptive GIS, that leverages big data analytics, semantic
web technologies and visual analytics methodologies. This approach
symbiotically combines system-side computation, data storage and semantic web
services capabilities with human-side perceptive skills, cognitive reasoning
and domain knowledge. We introduce a novel \textit{continuous refinement model}
to gradually minimize the real-world noise and dissolve heterogeneity in data
and metadata such that the information gain can be maximized. Our contribution
lies in
\begin{inparaenum}[(1)]
\item an \textit{integrated analytical pipeline} which includes
\item \textit{smart semantic web services},
\item \textit{domain expert knowledge extraction and generation }as
well as
\item \textit{modelling uncertainty} to process
high volume, high velocity and high dimensional spatio-temporal data from
unreliable and heterogeneous sources.
\end{inparaenum}
In Section \ref{sec:related}, we discuss related work. The platform's design is
introduced in Section \ref{sec:biggis} through the continuous refinement model,
while major challenges are presented. Use cases are shown in Section
\ref{sec:use}. Finally, Section \ref{sec:concl} concludes and addresses future
work.
\section{Related Work}
\label{sec:related}
Challenges related to the nature of big data has
lead to the evolution of new big data management and analytics architectures
embracing big data-aware GIS~\cite{Peng2014}. Marz proposes the
\textit{lambda architecture}~\cite{Marz2013}, a generic, scalable and
fault-tolerant data processing system design. By decomposing the problem into
three layers, namely batch layer, speed layer, and serving layer, this
architecture hybridly combines batch analytics on historic data and stream
analytics on streaming data to overcome eachs single weakenesses. Thakur et al.
introduce \textit{PlanetSense}~\cite{Thakur2015}, a real-time streaming and
spatio-temporal analytics platform for gathering geo-spatial intelligence from
open source data. Based on the lambda architecture, this platform enriches
large volumes of historic data by harvesting real-time data on the fly, e.g.,
social media, or passive and participatory sensors. While this design allows
for adhoc analysis ability during batch runs, the processing logic has to be
implemented twice. In a more recent approach, Kreps criticizes the
overall complexity of the lambda architecture and presents the \textit{kappa
architecture}~\cite{Kreps2014}, which simplifies the systems' design
by neglecting the batch layer. To replace batch processing, static data is
quickly fed through the streaming engine. A representative is
\textit{Plasmap}\footnote{\url{https://plasmap.io/}}, a high performance
geo-processing platform that provides a lightweight, interactive query language
for high-performance location discovery based on OpenStreetMap. In contrast to
BigGIS, both PlanetSense and Plasmap do not apply reasoning on semantic
metadata and domain expert knowledge during runtime. However, developments in
the field of \textit{semantic web technologies} show the opportunity of adding
higher semantic levels to existing frameworks in order to improve their
usage in terms of integrating spatio-temporal big data and ease scalability,
allowing for reasoning and comprehensive responses~\cite{Tanasescu2006,
Frank.2016a, Frank.2016b}. Analyses are often
performed in a descriptive, predictive or prescriptive way. While the
descriptive analysis visualizes the status quo, predictive and prescriptive
analysis focuses on future-oriented planning. As a result, the underlying model
and the visualization have to be tightly coupled in order for users to gain
knowledge. Users have the possibility to interactively alter a model's
parameters according to their knowledge, consequently the visualization adjusts
to the model in a feedback-loop. Knowledge generation is one important research
area where \textit{visual analytics} is of great use~\cite{Keim2008, Keim2010},
especially when considering uncertainty of heterogeneous spatio-temporal data
from various data sources \cite{SSK+16a}. J\"ackle et al. present one possible
visualization technique \cite{JSBK15} for data and uncertainties of large
spatial datasets, which is crucial within use cases where both facets are of
importance for decision-making.
%Andrienko et al. state that geovisual analytics need new approaches to
%deal with the complexity of data and address a research agenda for
%working with spatio-temporal data \cite{Andrienko2010}.
% Plasmap differs from BigGIS in that domain expert knowledge is not
%used to train the system.
\section{B\MakeLowercase{ig}GIS Platform}
\label{sec:biggis}
\subsection{Continuous Refinement Model in BigGIS}
\label{sec:crm}
In this section, we briefly describe the continuous refinement model in
BigGIS, which extends the knowledge generation model for visual
analytics \cite{SSS+14a}. This will on one hand allow to steadily
improve the analysis results, e.g., by updating deployed machine learning
models, and on the other hand to build the user's trust in these results by
creating awareness of underlying uncertainties and data provenance which is key
for providing meaningful predictive and prescriptive decision support in
various fields \cite{SSK+16a}. We consider uncertainty to be reciprocally
related to generating new insights and consequently knowledge. Thus, modelling
uncertainty is a crucial task in BigGIS. From a high-level perspective, our
approach consists of an integrated analytics pipeline which blends big data
analytics and semantic web services on system-side with domain expert
knowledge on human-side, thereby modelling uncertainty to continuously refine
results to generate new knowledge as shown in Figure \ref{fig:biggisworkflow}.
\begin{figure}
\centering
\includegraphics[width=\linewidth]{figures/biggis-workflow_v6}
\caption{Continuous refinement model in BigGIS.}
\label{fig:biggisworkflow}
\end{figure}
\subsubsection{Integrated Analytics Pipeline}
The analytics pipeline is the core of the continuous refinement model. A key
abstraction within this model are specific access points called
\textit{refinement gates} that are expressed by a topic-based publish-subscribe
pattern (see yellow squares in Figure \ref{fig:biggisworkflow}).
Refinement gates allow for smart semantic web services, external domain expert
knowledge and user interaction to enter the pipeline at arbitrary stages during
analyses to continuously improve data management and analyses results, e.g., to
support data preparation, to automatically deploy data transformation
workflows, to provide domain expert knowledge in order to train machine
learning models for pattern detection or to manipulate visualizations.
\subsubsection{Smart Semantic Web Services}
Locating all available data sources that are relevant for meaningful findings
in analytical processes is hard to do when it has to be done manually. Semantic
web technologies help to describe data sources using standard vocabularies.
Furthermore, reasoning on the logical entailments helps in discovering suitable
sources even if they are described differently, providing a two-level support
for users through what we call \textit{Linked APIs} and \textit{Cognitive
Apps}. The former abstracts away the user from manually performing data
integration steps to unify heterogeneous data sources by building on
appropriate ontologies~\cite{Frank.2016b} that support the system (direct
semantic support). The latter is a flexible service that is aware of a
situational context and capable of sharing it with other services (indirect
semantic support).
\subsubsection{Domain Expert Knowledge Extraction and Generation}
The user is another relevant part in the continuous refinement model who is
either provided with additional domain expert knowledge by another person or
she herself is the expert in a specific field of application (direct expert
knowledge). Overall, we see the continuous refinement process as a knowledge
transfer from human to system which is reinforced by smart semantic web
services. Thereby, human knowledge is introduced to the system that can contain
additional domain specific information and constraints. By doing so, big data
analytics can
\begin{inparaenum}[(1)]
\item leverage perceptive skills, cognitive reasoning of human analysis
to be able to establish hidden connections between data and the problem domain
and
\item continuously refine the analyses quality and results.
\end{inparaenum}
The system intelligently learns from the provided external domain knowledge,
such that it can reuse it for future tasks (indirect expert support). Thus,
leading to an increasing likelihood of relevant findings by a user during the
course of exploration and eventually to generating new knowledge.
\subsubsection{Modelling Uncertainty}
Uncertainty is inherent in data as well as in
models~\cite{cressie2015statistics}. While this is often obvious in data such
as volunteered geographic information (VGI) and participatory sensing data,
this holds true for all available data. Models derived from data
are only perfectly applicable for the data upon which they are learned.
Additionally, domain expert knowledge has some inherent uncertainty as well.
Thus, uncertainty constitutes an impediment for refinement. Transparently
showing uncertainty would mitigate problems in the first place and would help
the user to build up trust. To handle these uncertainties, we express them as
\textit{conditional probabilities}. These conditional probabilities allow us to
evaluate and model the uncertainty of each data point as well as forecast an
analytical model. We apply semantic reasoning on the provenance information of
data sources in order to infer a level of uncertainty that can be considered in
the analytical processes. We use \textit{bayesian hierarchical
models}~\cite{cressie2015statistics} to be able to cope with the conditional
probabilities quantified by the semantic reasoning. The idea behind this is
that we can model different parameters by their joint probability distribution.
Each parameter can be modelled by hyperparameters, which are again probability
distributions. The resulting models are probability distributions as well,
which can be used in our continuous refinement model. By doing so, we can
model, examine and present the uncertainty at each stage of the process to
enable the user of BigGIS to make a well-informed decision.
\subsection{Challenges}
\label{sec:chls}
Data volume and velocity are well-managed requirements through scalable
cloud-based big data architectures~\cite{Marz2013, Kreps2014}. Yet, there are
still additional big data dimensions, namely variety and veracity of
spatio-temporal data, that need to be dealt with in order to generate
meaningful knowledge. Based on this, we identify three major challenges.
\subsubsection{Varying big data related requirements}
The field of application specifies the degrees of big data related
requirements. Thus, efficiently managing the complex backend ecosystem for
varying requirements is a non-trivial task. We approach this challenge by
leveraging Apache Mesos\footnote{\url{http://mesos.apache.org/}} in combination
with container technologies such as Docker
\footnote{\url{https://www.docker.com/}}. In addition, dealing with data and
schema heterogeneity and inherent uncertainty is another relevant field of
research that BigGIS addresses. Preconditions for meaningful findings in GIS
are accurate, consistent and complete data as input for analytical processes.
However, as more spatio-temporal data sources emerge the quality of the data is
varying as well, especially when considering uncertain data such as VGI. We
intend to address this challenge by a smart data integration approach which is
based on semantically described data sources extending existing ontologies and
data transformation services according to the requirements of different
analytical goals~\cite{Frank.2016a, Frank.2016b}.
\subsubsection{Dimensionality reduction}
The continuous refinement model employed in BigGIS aims to provide real-time
data processing functionality to minimize the time to deliver insights to the
user. Spatio-temporal data, e.g., airborne hyperspectral images, are
high-dimensional and models built upon this data have to deal with the curse of
dimensionality due to the given real-time constraint. Also, while the presented
architecture can handle the challenges of big data, it is not always possible
to transfer all the raw data to our pipeline. In the example case of a sensor
on an unmanned aerial vehicle, the transfer rate depends on the available
bandwidth. BigGIS aims to deal with the challenge of dimensionality reduction
for spatio-temporal data, balancing between the robustness of a model and the
adaptability to training and input data.
\subsubsection{Bias-variance trade-off}
The bias-variance trade-off~\cite{Hastie2009} is of particular
interest in BigGIS, as the modelling of uncertainty in the continuous
refinement model is inherently connected to this. Generally, solving this
trade-off optimally is highly depending on the specific use case. Providing the
user with sufficient information to reason and generate knowledge under these
restrictions is one demanding problem. Here, the challenge lies in the speed
of computation, the different level of expertise for each user and the
available bandwidth to transfer information back to the user and the analytics
pipeline.
\section{Use Cases}
\label{sec:use}
%\textcolor{red}{ToDO@all: please review and comment}
BigGIS will support decision-making in multiple use cases that require
processing of large and heterogeneous spatio-temporal data from unreliable
sources. The prototype will be evaluated on three use cases:
\begin{inparaenum}[(1)]
\item smart city and health, i.e., heat stress in urban areas,
\item environmental management, i.e., spread of invasive species,
\item emergency management, i.e., identification and dispersion of hazardous
gas in chemical accidents.
\end{inparaenum}
These scenarios represent diverse categories of application domains that each
address varying big data related requirements.
%In brief, one example is
%the environmental management scenario, where farmers can provide real-time
%data
%(velocity) from differing sources (variety), e.g. private weather stations,
%photos of invasive
%species, or text messages about contaminated areas, though arriving with high
%uncertainty (veracity). The experts domain knowledge helps to train
%classifiers
%in BigGIS on already available labeled datasets (volume) that, in addition to
%further semantically described sources, helps conducting spatio-temporal
%statistics such as hot spot analyses to make better predictions on potentially
%jeopardized areas. Not only are farmers informed about the condition of their
%fields (descriptive) but also about risk potential of contamination
%(predictive), which lastly results in suggestions to perform certain
%counteractive measures (prescriptive).
%%this is just an alternative scenario; speech has to be updated.
In brief, an illustrating scenario in the aforementioned emergency management
use case is supporting rescue forces in assessing and managing large-scale and
complex chemical disasters. Providing an in-depth overview of the current
situation within a small time frame (velocity) is crucial to prevent exposing
the surrounding population to any hazardous substances. Recent developments in
the field of mobile robotics allow using in-situ components such as
autonomously flying unmanned aerial vehicles equipped with hyperspectral
cameras to scan the affected area for hazardous gases producing several
gigabytes of raw data per mission (volume). In addition, differing sources
(variety), e.g., weather stations, VGI or participatory sensing data, can be
integrated in BigGIS, though arriving with high uncertainty (veracity). The
combination of those datasets with various other semantically described data
sources, helps conducting spatio-temporal statistics. Furthermore, the experts
domain knowledge is used to train classifiers in order to automatically
classify the hazardous content and identify contaminated areas. Conditional
probabilities are computed to forecast the dispersion of the hazardous smoke
and visualized in risk maps to highlight potentially endangered areas. Not only
are the rescue forces informed about the current situation (descriptive), but
also about the risk potential of surrounding areas (predictive), which can be
used to automatically alert further public authorities and organizations that
would be enabled to perform specifically targeted measures (prescriptive).
\section{Conclusions and Future Work}
\label{sec:concl}
Big geodata will continually grow during the next years. The
rapidly increasing distribution and importance of remote sensing data, e.g.,
from unmanned aerial vehicles, and participatory sensing data as well as the
emergence of new data sources lead to more diverse, larger and unreliable data.
In this paper, we proposed BigGIS, a next generation predictive and
prescriptive GIS, that leverages big data analytics, semantic web technologies
and visual analytics methodologies through a novel continuous refinement model.
We showed the key architectural elements to master heterogeneity and
uncertainty in spatio-temporal big data to generate meaningful knowledge and
identified three main challenges. Currently, we are working on an integrated
prototype to support each of the presented use cases.
%\end{document} % This is where a 'short' article might terminate
%ACKNOWLEDGMENTS are optional
\section{Acknowledgements}
\label{sec:ack}
%This work has been developed in the project BigGIS. BigGIS (reference
%number: 01IS14012) is funded by the German ministry of education and research
%(BMBF) within the research programme ICT 2020.
The project BigGIS (reference number: 01IS14012) is funded by the Federal
Ministry of Education and Research (BMBF) within the frame of the research
programme ``Management and Analysis of Big Data'' in ``ICT 2020 --
Research for Innovations''.
%
% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliographystyle{abbrv}
\bibliography{biggis-paper} % sigproc.bib is the name of the
%Bibliography
%this case
% You must have a proper ".bib" file
% and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%
%APPENDICES are optional
%\balancecolumns
%\appendix
%Appendix A
\end{document}
| {
"alphanum_fraction": 0.8016606717,
"avg_line_length": 50.3333333333,
"ext": "tex",
"hexsha": "c648d9dc27671496d336a5c6ba49bce8e88ece59",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "66a0cb721c30a05ab062bf7dfc0384bc2dd0dc1c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "biggis-project/sigspatial-vision-paper",
"max_forks_repo_path": "biggis-paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66a0cb721c30a05ab062bf7dfc0384bc2dd0dc1c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "biggis-project/sigspatial-vision-paper",
"max_issues_repo_path": "biggis-paper.tex",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "66a0cb721c30a05ab062bf7dfc0384bc2dd0dc1c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "biggis-project/sigspatial-vision-paper",
"max_stars_repo_path": "biggis-paper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6878,
"size": 29747
} |
% !TEX root = ./busty_transcription.tex
\section{Mean Gene Expression}\label{section_02_means}
As noted in the previous section, there are two broad classes of models in play
for computing the input-output functions of regulatory architectures as shown in
Figure~\ref{fig1:means_cartoons}. In both classes of model, the promoter is
imagined to exist in a discrete set of states of occupancy, with each such state
of occupancy accorded its own rate of transcription --including no
transcription for many of these states. This discretization of a potentially
continuous number of promoter states (due to effects such as supercoiling of
DNA~\cite{Chong2014, Sevier2016} or DNA looping \cite{Boedicker2013a}) is
analogous to how the Monod-Wyman-Changeux model of allostery coarse-grains
continuous molecule conformations into a finite number of
states~\cite{Martins2011}. The models are probabilistic with each state assigned
some probability and the overall rate of transcription given by
\begin{equation}
\mbox{average rate of transcription} = \sum_i r_i p_i,
\label{eq:transcrip_prop_pbound}
\end{equation}
where $i$ labels the distinct states, $p_i$ is the probability of the
$i^{\text{th}}$ state, and $r_i$ is the rate of transcription of that state.
Ultimately, the different models differ along several key aspects: what states
to consider and how to compute the probabilities of those states.
The first class of models that are the subject of the present section focus on
predicting the mean level of gene expression. These models, sometimes known as
thermodynamic models, invoke the tools of equilibrium statistical mechanics to
compute the probabilities of the promoter microstates~\cite{Ackers1982,
Shea1985, Buchler2003, Vilar2003a, Vilar2003b, Bintu2005a, Bintu2005c,
Gertz2009, Sherman2012, Saiz2013}. As seen in
Figure~\ref{fig1:means_cartoons}(B), even within the class of thermodynamic
models, we can make different commitments about the underlying microscopic
states of the promoter. Model 1 considers only two states: a state in which a
repressor (with copy number $R$) binds to an operator and a transcriptionally
active state. The free energy difference between the repressor binding the
operator, i.e. a specific binding site, and one of the $N_{NS}$ non-specific
sites is given by $\Delta\varepsilon_R$ (given in $k_BT$ units with $\beta\equiv
(k_BT)^{-1}$). Model 2 expands this model to include an empty promoter where no
transcription occurs, as well as a state in which one of the $P$ RNAPs binds to
the promoter with binding energy $\Delta\varepsilon_P$. Indeed, the list of
options considered here does not at all exhaust the suite of different
microscopic states we can assign to the promoter. The essence of thermodynamic
models is to assign a discrete set of states and to use equilibrium statistical
mechanics to compute the probabilities of occupancy of those states.
The second class of models that allow us to access the mean gene expression use
chemical master equations to compute the probabilities of the different
microscopic states ~\cite{Ko1991, Peccoud1995, Record1996, Kepler2001,
Sanchez2008, Shahrezaei2008, Sanchez2011, Michel2010}. The main differences
between both modeling approaches can be summarized as: 1) Although for both
classes of models the steps involving transcriptional events are assumed to be
strictly irreversible, thermodynamic models force the regulation, i.e., the
control over the expression exerted by the repressor, to be in equilibrium. This
does not need to be the case for kinetic models. 2) Thermodynamic models ignore
the mRNA count from the state of the Markov process, while kinetic models keep
track of both the promoter state and the mRNA count. 3) Finally, thermodynamic
and kinetic models coarse-grain to different degrees the molecular mechanisms
through which RNAP enters the transcriptional event. As seen in
Figure~\ref{fig1:means_cartoons}(C), we consider a host of different kinetic
models, each of which will have its own result for both the mean (this section)
and noise (next section) in gene expression.
\subsection{Fold-changes are indistinguishable across models}
As a first stop on our search for the ``right'' model of simple repression, let
us consider what we can learn from theory and experimental measurements on the
average level of gene expression in a population of cells. One experimental
strategy that has been particularly useful (if incomplete since it misses out on
gene expression dynamics) is to measure the fold-change in mean
expression~\cite{Garcia2011}. The fold-change $FC$ is defined as
\begin{equation}
FC(R)
= \frac{\langle \text{gene expression with }R > 0 \rangle}
{\langle \text{gene expression with }R = 0 \rangle}
= \frac{\langle m (R) \rangle}{\langle m (0) \rangle}
= \frac{\langle p (R) \rangle}{\langle p (0) \rangle},
\label{eq:fc_def}
\end{equation}
where angle brackets $\left\langle \cdot \right\rangle$ denote the average over
a population of cells and mean mRNA $\langle m\rangle$ and mean protein $\langle
p\rangle$ are viewed as a function of repressor copy number $R$. What this means
is that the fold-change in gene expression is a relative measurement of the
effect of the transcriptional repressor ($R > 0$) on the gene expression level
compared to an unregulated promoter ($R = 0$). The third equality in
Eq.~\ref{eq:fc_def} follows from assuming that the translation efficiency, i.e.,
the number of proteins translated per mRNA, is the same in both conditions. In
other words, we assume that mean protein level is proportional to mean mRNA
level, and that the proportionality constant is the same in both conditions and
therefore cancels out in the ratio. This is reasonable since the cells in the
two conditions are identical except for the presence of the transcription
factor, and the model assumes that the transcription factor has no direct effect
on translation.
Fold-change has proven a very convenient observable in past
work~\cite{Garcia2011a, Brewster2014, Razo-Mejia2018, Chure2019}. Part of its
utility in dissecting transcriptional regulation is its ratiometric nature,
which removes many secondary effects that are present when making an absolute
gene expression measurement. Also, by measuring otherwise identical cells with
and without a transcription factor present, any biological noise common to both
conditions can be made to cancel out. Figure~\ref{fig1:means_cartoons}(B) and
(C) depicts a smorgasbord of mathematicized cartoons for simple repression using
both thermodynamic and kinetic models, respectively, that have appeared in
previous literature. For each cartoon, we calculate the fold-change in mean gene
expression as predicted by that model, deferring most algebraic details to
Appendix~\ref{sec:non_bursty}. What we will find is that for all cartoons the
fold-change can be written as a Fermi function of the form
\begin{equation}
FC(R) = \left( 1 + \exp(-\Delta F_R(R) + \log(\rho)) \right)^{-1},
\label{eq:deltaFR_eq_noneq_equiv}
\end{equation}
where the effective free energy contains two terms: the parameters $\Delta F_R$,
an effective free energy parametrizing the repressor-DNA interaction, and
$\rho$, a term derived from the level of coarse-graining used to model all
repressor-free states. In other words, the effective free energy of the Fermi
function can be written as the additive effect of the regulation given by the
repressor via $\Delta F_R$, and the kinetic scheme used to describe the steps
that lead to a transcriptional event via $\log(\rho)$ (See
Figure~~\ref{fig1:means_cartoons}(D), left panel). This implies all models
collapse to a single master curve as shown in
Figure~\ref{fig1:means_cartoons}(D). We will offer some intuition for why this
master curve exists and discuss why at the level of the mean expression, we are
unable to discriminate ``right'' from ``wrong'' cartoons given only measurements
of fold-changes in expression.
\subsubsection{Two- and Three-state Thermodynamic Models}
We begin our analysis with models 1 and 2 in
Figure~\ref{fig1:means_cartoons}(B). In each of these models the promoter
is idealized as existing in a set of discrete states; the difference being
whether or not the RNAP bound state is included or not. Gene expression is then
assumed to be proportional to the probability of the promoter being in either
the empty state (model 1) or the RNAP-bound state (model (2)). We direct the
reader to Appendix~\ref{sec:non_bursty} for details on the derivation of the
fold-change. For our purposes here, it suffices to state that the functional
form of the fold-change for model 1 is
\begin{equation}
FC(R)
= \left(1 + \frac{R}{N_{NS}} e^{-\beta\Delta\varepsilon_R}\right)^{-1},
\end{equation}
where $R$ is the number of repressors per cell, $N_{NS}$ is the number of
non-specific binding sites where the repressor can bind, $\Delta\varepsilon_R$
is the repressor-operator binding energy, and $\beta \equiv (k_BT)^{-1}$. This
equation matches the form of the master curve in
Figure~\ref{fig1:means_cartoons}(D) with $\rho=1$ and $\Delta F_R =
\beta\Delta\varepsilon_r - \log (R / N_{NS})$. For model 2 we have a similar
situation. The fold-change takes the form
\begin{eqnarray}
FC(R)
&=& \left(
1 + \frac{\frac{R}{N_{NS}} e^{-\beta\Delta\varepsilon_R}}
{1 + \frac{P}{N_{NS}} e^{-\beta\Delta\varepsilon_P}}
\right)^{-1}
\\
&=& (1 + \exp(-\Delta F_R + \log\rho))^{-1},
\end{eqnarray}
where $P$ is the number of RNAP per cell, and $\Delta\varepsilon_P$ is the
RNAP-promoter binding energy. For this model we have $\Delta F_R =
\beta\Delta\varepsilon_R - \log(R/N_{NS})$ and $\rho = 1 +
\frac{P}{N_{NS}}\mathrm{e}^{-\beta\Delta\varepsilon_P}$. Thus far, we see that
the two thermodynamic models, despite making different coarse-graining
commitments, result in the same functional form for the fold-change in mean gene
expression. We now explore how kinetic models fare when faced with computing
the same observable.
\subsubsection{Kinetic models}
One of the main difference between models shown in
Figure~\ref{fig1:means_cartoons}(C), cast in the language of chemical master
equations, compared with the thermodynamic models discussed in the previous
section is the probability space over which they are built. Rather than keeping
track only of the microstate of the promoter, and assuming that gene expression
is proportional to the probability of the promoter being in a certain
microstate, chemical master equation models are built on the entire probability
state of both the promoter microstate, and the current mRNA count. Therefore, in
order to compute the fold-change, we must compute the mean mRNA count on each of
the promoter microstates, and add them all together~\cite{Sanchez2013}.
Again, we consign all details of the derivation to
Appendix~\ref{sec:non_bursty}. Here we just highlight the general findings for
all five kinetic models. As already shown in Figure~\ref{fig1:means_cartoons}(C)
and (D), all the kinetic models explored can be collapsed onto the master curve.
Given that the repressor-bound state only connects to the rest of the promoter
dynamics via its binding and unbinding rates, $k_R^+$ and $k_R^-$ respectively,
all models can effectively be separated into two categories: a single
repressor-bound state, and all other promoter states with different levels of
coarse graining. This structure then guarantees that, at steady-state, detailed
balance between these two groups is satisfied. What this implies is that the
steady-state distribution of each of the non-repressor states has the same
functional form with or without the repressor, allowing us to write the
fold-change as a product of the ratio of the binding and unbinding rates of the
promoter, and the promoter details. This results in a fold-change of the form
\begin{eqnarray}
FC &=& \left( 1 + \frac{k_R^+}{k_R^-} \rho \right)^{-1},\\
&=& (1 + \exp(-\Delta F_R + \log(\rho) ))^{-1},
\end{eqnarray}
where $\Delta F_R \equiv -\log(k_R^+/k_R^-)$, and the functional forms of $\rho$
for each model change as shown in Figure~\ref{fig1:means_cartoons}(C). Another
intuitive way to think about these two terms is as follows: in all kinetic
models shown in Figure~1(C) the repressor-bound state can only be reached from a
single repressor-free state. The ratio of these two states --repressor-bound and
adjacent repressor-free state-- must remain the same for all models, regardless
of the details included in other promoter states if $\Delta F_R$ represents an
effective free energy of the repressor binding the DNA operator. The presence of
other states then draws probability density from the promoter being in either of
these two states, making the ratio between the repressor-bound state and
\textit{all} repressor-free states different. The log difference in this ratio
is given by $\log(\rho)$. Since model 1 and model 5 of Figure~1(C) consist of a
single repressor-free state, $\rho$ is then necessarily 1 (See
Appendix~\ref{sec:non_bursty} for further details).
The key outcome of our analysis of the models in
Figure~\ref{fig1:means_cartoons} is the existence of a master curve shown in
Figure~\ref{fig1:means_cartoons}(D) to which the fold-change predictions of all
the models collapse. This master curve is parametrized by only two effective
parameters: $\Delta F_R$, which characterizes the number of repressors and their
binding strength to the DNA, and $\rho$, which characterizes all other features
of the promoter architecture. The key assumption underpinning this result is
that no transcription occurs when a repressor is bound to its operator. Given
this outcome, i.e., the degeneracy of the different models at the level of
fold-change, a mean-based metric such as the fold-change that can be readily
measured experimentally is insufficient to discern between these different
levels of coarse-graining. The natural extension that the field has followed for
the most part is to explore higher moments of the gene expression distribution
in order to establish if those contain the key insights into the mechanistic
nature of the gene transcription process~\cite{Iyer-Biswas2009,Munsky2012}.
Following a similar trend, in the next section we extend the analysis of the
models to higher moments of the mRNA distribution as we continue to examine the
discriminatory power of these different models. | {
"alphanum_fraction": 0.7881468069,
"avg_line_length": 63.8177777778,
"ext": "tex",
"hexsha": "4261f95f6d022d181b9acb6e781cb55a9ec29a9a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cd3082c567168dfad12c08621976ea49d6706f89",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RPGroup-PBoC/bursty_transcription",
"max_forks_repo_path": "doc/resubmission/section_02_means.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cd3082c567168dfad12c08621976ea49d6706f89",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RPGroup-PBoC/bursty_transcription",
"max_issues_repo_path": "doc/resubmission/section_02_means.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cd3082c567168dfad12c08621976ea49d6706f89",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RPGroup-PBoC/bursty_transcription",
"max_stars_repo_path": "doc/resubmission/section_02_means.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3554,
"size": 14359
} |
\section{Conclusion}
In this report we presented a way to parallelize Myers' longest common subsequence algorithm and applied various optimizations. Our results showed that that our simple row-wise algorithm scales well to many processes. Our algorithms hold up against diffutils when run sequentially. Against our expectations, our dynamic priority algorithm does not scale well.
\mypar{Future work}
In our work on this algorithm we considered various ideas to increase performance or reduce memory consumption, which we unfortunately did not have the time to experiment with for our report.
As such, we currently do not read out the edit script due to the quadratic memory consumption that it requires with this algorithm. If our algorithm is adapted to use Myers' recursive approach for linear space refinement \cite{myers_anond_1986} (which calls the non-recursive algorithm), it could output the edit script with only a minor performance cost.
It is possible that using SIMD to calculate multiple cells in parallel would improve the performance further.
% Here you need to summarize what you did and why this is
% important. {\em Do not take the abstract} and put it in the past
% tense. Remember, now the reader has (hopefully) read the report, so it
% is a very different situation from the abstract. Try to highlight
% important results and say the things you really want to get across
% such as high-level statements (e.g., we believe that .... is the right
% approach to .... Even though we only considered x, the
% .... technique should be applicable ....) You can also formulate next
% steps if you want. Be brief. After the conclusions there are only the references.
| {
"alphanum_fraction": 0.7898164594,
"avg_line_length": 84.45,
"ext": "tex",
"hexsha": "29fde431fef1c530be2e8846ce32ba6b03f5d61a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9d6dafc9dc16dcf97b4c712dbb8c6dace25eeee5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tehwalris/mpi-myers-diff",
"max_forks_repo_path": "report_src/sections/05-conclusions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9d6dafc9dc16dcf97b4c712dbb8c6dace25eeee5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tehwalris/mpi-myers-diff",
"max_issues_repo_path": "report_src/sections/05-conclusions.tex",
"max_line_length": 359,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "9d6dafc9dc16dcf97b4c712dbb8c6dace25eeee5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tehwalris/mpi-myers-diff",
"max_stars_repo_path": "report_src/sections/05-conclusions.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-13T17:47:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-09T11:30:02.000Z",
"num_tokens": 342,
"size": 1689
} |
\section*{Introduction}
\addcontentsline{toc}{section}{Introduction}
Monte Carlo (MC) methods are an important tool for multi-dimensional integration
and they find application in many fields, including particle physics.
However, as the computations associated with them can be very expensive parallelization is essential.
The most straightforward option is to simply run them on several CPU cores in parallel.
Another option is to use hardware accelerators such as GPUs to speed up the calculation.
This paper introduces a few fundamental MC techniques and algorithms,
explains the hardware and thread model of GPUs,
lists a few programming patterns can be used for efficient parallelization,
and finally shows a simple benchmark of MC when run on either a CPU or GPU.
| {
"alphanum_fraction": 0.8220779221,
"avg_line_length": 59.2307692308,
"ext": "tex",
"hexsha": "2f004050ff03c466a3bd439b3803152deaa6640f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0d43f3b70d71f063f872a5fb8b09cde3b756ee36",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "JohannesGaessler/presentation_mc",
"max_forks_repo_path": "LaTeX/Parts/01_introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0d43f3b70d71f063f872a5fb8b09cde3b756ee36",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "JohannesGaessler/presentation_mc",
"max_issues_repo_path": "LaTeX/Parts/01_introduction.tex",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0d43f3b70d71f063f872a5fb8b09cde3b756ee36",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "JohannesGaessler/presentation_mc",
"max_stars_repo_path": "LaTeX/Parts/01_introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 146,
"size": 770
} |
\documentclass[natbib]{article}
\usepackage{microtype}
\usepackage{lmodern}
\usepackage{url}
\usepackage{xspace}
\usepackage{calc}
\usepackage{enumerate}
\usepackage{listings}
\usepackage{amsmath,amssymb}
\usepackage{rotating}
\usepackage{colortbl}
\usepackage{pifont}
\usepackage{tikz}
%\usetikzlibrary{shapes,shadows,arrows,calc,positioning,fit,matrix,mindmap,trees}
%\usepackage{pgfplots}
%\usepackage{pgfplotstable}
\usepackage{booktabs}
\usepackage{natbib}
\usepackage{colortbl}
\usepackage{algorithm2e}
\usepackage{syntax}
% pantone colors
% More sensible defaults akin to \sloppy
% \tolerance 1414
% \hbadness 1414
% \emergencystretch 1.5em
% \hfuzz 0.3pt
% \widowpenalty=10000
% \clubpenalty=10000
% \vfuzz
% \hfuzz
% \raggedbottom
\newcommand{\ignore}[1]{}
\newcommand{\st}{\textit{s.\,t.}\xspace}
\newcommand{\eg}{\textit{e.\,g.}\xspace}
\newcommand{\ie}{\textit{i.\,e.}\xspace}
\newcommand{\cf}{\textit{cf.}\xspace}
\newcommand{\blackarrow}{{\color{black} \Pisymbol{pzd}{217}}}
\newcommand{\redarrow}{{\color{DarkRed} \Pisymbol{pzd}{217}}}
\newcommand{\minibox}[2]{\begin{minipage}{#1}\raggedright #2\end{minipage}}
\newcommand{\enquote}[1]{``#1''}
%\newcommand{\fixme}[1]{\begin{tikzpicture}
%\node[bottom color=red!80!white, top color=red!70!black, rounded corners,
% font=\bf\color{white}\footnotesize] {
% \begin{minipage}{.75\columnwidth}
% FIXME\\
% #1
% \end{minipage}
%};
%\end{tikzpicture}
%}
\lstset{
language=C,
basicstyle=\small,%\scriptsize, %\footnotesize\ttfamily,
keywordstyle={\bf},
keywordstyle={[2]\it},%\color{Blue!40!black}},
breaklines=true,
identifierstyle=,
stringstyle=\bf,
commentstyle=\it\color{black!80},
captionpos=b,
numbers=left,
stepnumber=3,
columns=fullflexible
}
\begin{document}
\title{Typeforge User Manual\\Version 1.0.0}
\author{\small Markus Schordan, Tristan Vanderbruggen, Nathan Pinnow}
%\end{tabular}
\date{January 11, 2019}
\maketitle
%\begin{abstract}
%\noindent Typeforge is a tool for analysis and transformation of variable types in
%C/C++ programs. The main focus of development was to aid the development of
%mixed-precision programs through modification and searching of the AST.
%Typeforge does this through changing type information and inserting program
%instrumentation then outputting modified source code for the user or other tools to use.
%
%\end{abstract}
\tableofcontents
%-------------------------------------------------------------------------
\section{Introduction}
\label{sec:intro}
Typeforge is based on the ROSE compiler infrastructure\footnote{\url{http://www.rosecompiler.org/}}
and uses the ROSE abstract syntax tree as basis for its transformations.
A main focus of Typeforge development was as part of a tool pipeline designed for
the automatic generation of mixed-precision programs. For use in the pipeline
Typeforge works with ADAPT~\cite{adapt} to insert the needed instrumentation to
perform automatic differentiation for the purpose of finding variables error threshold.
Typeforge was also designed to work with CRAFT~\cite{CRAFT2013PARCO,CRAFT2013ICS,CRAFT2016}
for the purpose of searching mixed-precision configurations built by Typeforge
for the best performance.
\subsection{CRAFT-ADAPT Pipeline}\label{sec:pipeline}
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{pipeline.png}
\caption{\textsf{Diagram of pipeline structure}}
\label{fig:pipeline}
\end{figure}
\noindent
Typeforge has several uses in the pipeline for automatic generation of mixed precision programs
as seen in figure \ref{fig:pipeline}.
In order for craft to conduct a proper search it needs to define
a search space which can be done by Typeforge by looking for all declarations in the AST. This
list can be refined by looking at sets to avoid searching configurations that will not compile.
When CRAFT tests a configuration it passes the appropriate plugin to Typeforge so
that it can generate new source code and compile the configuration. This lets CRAFT gain
performance metrics on a configuration instead of estimations. To refine the search space ADAPT
feeds precision information to CRAFT but for ADAPT to run modifications need to be made to the
source code. These modifications are pragma replacement, adding includes, replacing floating point
types with AD\_real, and adding ADAPT function calls. All of the ADAPT modifications and Variable
listing can be done with Typeforge with a single plugin shown in section \ref{sec:expampleSpec}.
\section{Installation}
First step is to install ROSE which will also configure Typeforge but not install
by default as part of ROSE. to install Typeforge after installing ROSE run
'make install' and optionally 'make check' in the \verb+projects/+ \verb+typeforge+ directory to
install Typeforge. Typeforge is installed as 'typeforge' (at the same location as other ROSE tools,
in the 'bin' directory of the ROSE installation).
\section{Command Line Options}
The command line options of Typeforge are parsed by Boost's program options
library\footnote{\url{http://www.boost.org/doc/libs/1_63_0/doc/html/program_options.html}}.
The following command line options are listed when running \verb+typeforge --help+.
These main options below comprise general parameters such as spec file and explicit command line
transformation. All filenames and unrecognized options will be passed directly to the ROSE compiler
as command line options.
\begin{verbatim}
typeforge <filename> [OPTIONS]
Supported Options:
-h [ --help ] Produce this help message.
-v [ --version ] Display the version of Typeforge.
--compile Run backend compiler.
--explicit Make all implicit casts explicit. This option can be
used to make all implicit cases visible to the user in
the (genrated) program source.
--cast-stats Print statistics on casts of built-in
floating point types.
--stats Print statistics on changes performed on the
input program.
--trace Print program transformation operations
as they are performed.
--plugin arg Name of Typeforge plugin files.
--csv-stats-file arg Generate file [args] with transformation statistics.
--typeforge-out File to store output inside of JSON.
\end{verbatim}
\section{Plugin Definition File}
To define what actions Typeforge should perform it uses a JSON file as a format for the plugin definition. The JSON file is generated using ToolConfig and was designed to allow for other programs to create specifications for Typeforge. The ToolConfig JSON has several header string fields named executable, tool\_id, version, and a list of strings named source\_files. These fields are not used for input to Typeforge and a designed for information tracking. For providing the actions to Typeforge ToolConfig has a list of ToolAction named actions. Each item in the actions list is also a JSON object with each item defining an action for Typeforge to take. All actions have an analysis phase and some will have an execution phase where they will make modifications to the AST. The separation of phases allows for an action to find a change and later make the change without another action finding a change location based upon the changed code. If actions contradict the behavior is undefined.
\subsection{Handles} \label{sec:handles}
Several actions are related to the use of compiler generated handles. Typeforge is capable of emitting string
based handles for the purpose of variable and function identification. These handles are guaranteed to
uniquely identify a variable or function in the program and can be used to specify them to Typeforge.
The content and format of these handles is not guaranteed and is
subject to change.
\subsection{Actions}
The grammar below defines how the plugin definition file is made.
The symbols "$\langle\rangle$" are used to indicate a non-terminal, the symbol "$\mid$" is used to
indicate alternative, "()*" is used to indicate repeat what is in the group zero or more times.
All other symbols are terminal.
\begin{grammar}
<PluginDefinitionFile> ::= \{''actions'' : [<Action>(, <Action>)*]\}
<Action> ::= <ActionID>, ((<Scope>, <Name>) | <Handle>), <ToType>
\alt <ActionID>, <Scope>, <FromType>, <ToType>
\alt <ActionID>, <Scope>, <Name>
\alt <ActionID>, <Scope>,
\alt <ActionID>, <FromType>, <ToType>
<ActionID> ::= ''"action"'': ''<ActionSpecifier>''
<Scope> ::= ''"scope"'': ''<ScopeSpecifier>''
<Name> ::= ''"name"'': ''<NameSpecifier>''
<ToType> ::= ''"to_type"'': ''NewType''
<FromType> ::= ''"from_type"'': ''MatchType''
<Handle> ::= ''"handle"''":" ''CompilerHandle''
\end{grammar}
\subsubsection{Var Type Change} \label{sec:vartypechange}
This action is for changing the type or base type of a specified variable. Actions related to
changing type end in type or basetype which is used to specify how the change should be made.
When type is used the type will be changed and potentially matched based on the exact type
regardless of pointers and arrays. When basetype is used Typeforge will match and change types
by stripping away then rebuilding arrays, pointers, typedefs, reference, and modifiers.
Scope can be either the name of the function where the variable is located or "\$global"
for a global variable. Name can be the name of a single variable or a comma separated
list of variables to change. A handle can be specified instead of using scope and variable name. New type
should be the type that the variable will be changed to.
\begin{grammar}
<ActionSpecifier> ::= "change_var_type" | "change_var_basetype"
<ScopeSpecifier> ::= FunctionName | "\$global"
<NameSpecifier> ::= VariableName(,VariableName)*
\end{grammar}
\begin{verbatim}
Example:
{
"action": "change_var_basetype",
"scope": "main",
"name": "x",
"to_type": "float"
},
{
"action": "change_var_basetype",
"handle": "Project<numbering,1>::FileList<numbering,1>::SourceFile
<name,/home/src/main.C>::VariableDeclaration<position,7.1-7.15>",
"to_type": "float"
}
\end{verbatim}
\subsubsection{Change Type} \label{sec:chagetype}
This action is to replace all variables of a type with a new type. Match\_Type is the type that Typeforge
will search for and New\_Type is the type that it will replace match type with.
Scope is the locations where variables should be changed and has several forms.
Use "\$global" to specify replacing globals, otherwise use function name followed by colon
then parts of the function to change in a comma separated list. Use args to change arguments,
body to change the body of the function, and ret to change the return type. The function name
can be replaced with * to change all functions. For example to change everything in main use
"main:args,ret,body". See section \ref{sec:vartypechange} for more information on type changing.
\begin{grammar}
<ActionSpecifier> ::= "change_every_type" | "change_every_basetype"
<ScopeSpecifier> ::= FunctionName:<VariableLocationList>
\alt "*":<VariableLocationList> | "\$global"
<VariableLocationList> ::= <VariableLocation>(, <VariableLocation>)*
<VariableLocation> ::= "args" | "ret" | "body"
\end{grammar}
\begin{verbatim}
Example:
{
"action": "change_every_type",
"scope": "*:body",
"from_type": "double",
"to_type": "AD_real"
}
\end{verbatim}
\subsubsection{Listing Changes} \label{sec:listChange}
Will output a list of possible replacements that have the same type as Match\_Type without changing the type.
Will output a JSON file with the actions set so when input as a plugin will result in the changes being made.
The output list will include the scope name and compiler handle. Scope is a function name, * for all functions,
or \$global for global variables. Scope can also be omitted for Typeforge to examine the entire program.
New\_Type is only used for writing a valid plugin definition file to output.
See section \ref{sec:vartypechange} for more information on type changing.
\begin{grammar}
<ActionSpecifier> ::= "list_changes_type" | "list_changes_basetype"
<ScopeSpecifier> ::= FunctionName | "*" | "\$global"
\end{grammar}
\begin{verbatim}
Example:
{
"action": "list_changes_basetype",
"scope": "",
"from_type": "double",
"to_type": "float",
}
\end{verbatim}
\subsubsection{ADAPT Instrumentation}
Inserts ADAPT function calls where appropriate. Will look for any location in the AST where a floating
point type is assigned to or initialized then insert the correct AD\_intermidiate function
call after the assignment with the name passed to ADAPT being set to the variables handle.
If "\#pragma adapt begin" is included in the body will insert instrumentation for initialized
globals immediately after the pragma. Will not do type replacement for AD\_real, including
of ADAPT headers, or adapt pragma replacement. Scope is the function name where
instrumentation should be inserted or "*" for all functions.
\begin{grammar}
<ActionSpecifier> ::= "ad_intermediate_instrumentation"
<ScopeSpecifier> ::= FunctionName | "*"
\end{grammar}
\begin{verbatim}
Example:
{
"action": "ad_intermediate_instrumentation",
"scope": "*"
}
\end{verbatim}
\subsubsection{Add Include}
Will add include to files in the AST. Scope is used to specify to
only insert the include in files that define a specific function or use "*" for all files.
Name is the name of the file to be included.
\begin{grammar}
<ActionSpecifier> ::= "add_include"
<ScopeSpecifier> ::= FunctionName | "*"
<NameSpecifier> ::= FileName
\end{grammar}
\begin{verbatim}
Example:
{
"action": "add_include",
"scope": "main",
"name": "adapt-impl.cpp"
}
\end{verbatim}
\subsubsection{Pragma Replacement}
Will perform simple pragma replacement inside the AST. Will only
replace pragmas that begin with MatchType excluding the \#pragma. NewType is what the
pragma will be replaced with. Can specify arguments by writing \$N where N is the argument
number with 0 being the first token after the pragma in the source file including the matched string.
\begin{grammar}
<ActionSpecifier> ::= "replace_pragma"
\end{grammar}
\begin{verbatim}
Example:
{
"action": "replace_pragma",
"from_type": "adapt begin",
"to_type": "AD_Begin();"
}
\end{verbatim}
\subsection{Example Spec Files} \label{sec:expampleSpec}
\subsubsection{Pipeline Initial Start}
This plugin definition file is used as setup for the pipeline in section \ref{sec:pipeline}.
Note there is an action to change all doubles to AD\_real and an action to
list replacements for doubles. These can both work as they are performed on the original tree.
\begin{verbatim}
{
"version": "1",
"tool_id": "Master",
"actions": [
{
"action": "replace_pragma",
"from_type": "adapt begin",
"to_type": "AD_begin();"
},
{
"action": "add_include",
"name": "adapt-impl.cpp",
"scope": "main"
},
{
"action": "transform",
"scope": "*",
"from_type": "float",
"name": "ad_intermediate_instrumentation"
},
{
"action": "change_every_basetype",
"scope": "*:args,ret,body",
"from_type": "double",
"to_type": "AD_real"
},
{
"action": "list_changes_basetype",
"scope": "",
"from_type": "double",
"to_type": "float",
"name": "outputFile.json"
}
]
}
\end{verbatim}
\subsubsection{CRAFT Configuration}
This plugin file is to change the type of two specific variables to float types.
One change is based upon the handle while the other is based upon specifying function
name and variable name.
\begin{verbatim}
{
"version": "1",
"tool_id": "CRAFT",
"actions": [
{
"action": "change_var_basetype",
"handle": "Project<numbering,1>::FileList<numbering,1>::SourceFile
<name,/home/src/test.C>::VariableDeclaration<position,1.1-1.15>",
"to_type": "float"
}
{
"action": "change_var_basetype",
"scope": "main",
"to_type": "float",
"name": "x"
}
]
}
\end{verbatim}
\section{Experimental Features} \label{analysis}
\subsection{Variable Sets}
When changing the type of a variable it is possible that changing types will result in a
compilation error due to interdependent variables. This happens when variables are connected,
such as through assignment, and the types cannot simply be cast to be the same as with pointers
or arrays. This results in every variable being part of a dependence set where all the variables
in a given set must be changed together or the program will fail to compile. Given how these
dependence sets are defined all variables will be part of a class and the classes will not intersect.
Section \ref{sec:setAlg} shows the algorithm for set generation for just variables and
section \ref{sec:setDef} shows a definition for the sets.
\subsubsection{Sets Definition} \label{sec:setDef}
Let V be a set of all variables, function parameters, and function return types.
Let M be a set of variable sets showing what a variable is directly related to. Each Variable is associated with a single set.
Let S be resulting fully connected sets with the following properties.
\begin{gather*}
\forall i \in V(\exists! j \in S(i \in j))\\
\forall x \in M(\exists! y \in S(x \cap y \neq \emptyset) \wedge
\exists! z \in S(x \subseteq z))
\end{gather*}
\subsubsection{Sets Algorithm} \label{sec:setAlg}
\begin{algorithm}[H]
\label{variableSetAlgo}
\SetAlgoLined
\KwResult{Dependence\_Sets}
Map$\langle$Node,Set$\langle$Node$\rangle\rangle$ Dependence\_Map;\\
List$\langle$Set$\langle$Node$\rangle\rangle$ Dependence\_Sets;\\
\For{Variable $\in$ All\_Variables}{
Dependence\_Map.Add(Variable, Variable);
}
\For{Node $\in$ All AST Nodes}{
\If{Node == Expression and Node.type == [Pointer or Array]}{
\If{Node == Assignment\_Node}{
Destination = Left Hand Side;\\
Origin = Right Hand Side;\\
\For{VarRef $\in$ All Variable References in Origin}{
Dependence\_Map.Add(VarRef, Destination);\\
Dependence\_Map.Add(Destination, VarRef);
}
}
}
}
\For{Variable\_Set $\in$ Dependence\_Map}{
Matched = Null;\\
\For{Set $\in$ Dependence\_Sets}{
\If{Variable\_Set Intersects Set}{
\eIf{Matched}{
Matched.add(Set);\\
Dependence\_Set.Remove(Set)
}{
Set.add(Variable\_Set);\\
Matched = Set;
}
}
}
\If{Not Matched}{
Dependence\_Sets.Add(Variable\_Set)
}
}
\caption{Algorithm for building variable sets}
\end{algorithm}
\subsubsection{Set List Action}
This action functions the same as listing changes in section \ref{sec:listChange}. The difference is instead
of listing every action as a separate change the connected variables will be part of a single action
separated by '=='. When sent back to Typeforge as a plugin it will change the variables as an atomic action.
\begin{grammar}
<ActionSpecifier> ::= "'set_changes_basetype'"
\end{grammar}
\begin{verbatim}
Example:
{
"action": "set_changes_basetype",
"from_type": "double",
"to_type": "float",
}
\end{verbatim}
\subsubsection{Change Variable Set Action}
This command is similar to the change variable type action in section \ref{sec:vartypechange}.
The difference with this command is once it finds the variable to change it will look up the set for
that variable and change the entire set as an atomic action based on a representative element.
\begin{grammar}
<ActionSpecifier> ::= change_set_basetype
\end{grammar}
\begin{verbatim}
Example:
{
"action": "change_set_basetype",
"scope": "main",
"name": "x",
"to_type": "float"
}
\end{verbatim}
\bibliographystyle{plain}
\bibliography{typeforge}
\end{document}
| {
"alphanum_fraction": 0.7189766518,
"avg_line_length": 37.2088724584,
"ext": "tex",
"hexsha": "428b3f4a01d575b1f176270e045f04af19fefb57",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e91c0447140040a9ae02f3a58af3621a00a5e242",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "LLNL/typeforge",
"max_forks_repo_path": "docs/typeforge.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e91c0447140040a9ae02f3a58af3621a00a5e242",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "LLNL/typeforge",
"max_issues_repo_path": "docs/typeforge.tex",
"max_line_length": 993,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "e91c0447140040a9ae02f3a58af3621a00a5e242",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "LLNL/typeforge",
"max_stars_repo_path": "docs/typeforge.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-01T15:10:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-17T19:22:37.000Z",
"num_tokens": 5030,
"size": 20130
} |
\chapter{scattering theory solutions}
\begin{abox}
Practice set 1 solutions
\end{abox}
\begin{enumerate}
\begin{minipage}{\textwidth}
\item A free particle described by a plane wave and moving in the positive $z$-direction undergoes scattering by a potential
$$
V(r)= \begin{cases}V_{0}, & \text { if } r \leq R \\ 0, & \text { if } r>R\end{cases}
$$
If $V_{0}$ is changed to $2 V_{0}$, keeping $R$ fixed, then the differential scattering cross-section, in the Born approximation.
\exyear{NET JUNE 2012}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}]increases to four times the original value
\task[\textbf{B.}]increases to twice the original value
\task[\textbf{C.}]decreases to half the original value
\task[\textbf{D.}]decreases to one fourth the original value
\end{tasks}
\begin{answer}
$V(r)= \begin{cases}V_{0}, & r \leq R \\ 0, & r>R\end{cases}$\\
$$\text { Low energy scattering amplitude } f(\theta, \phi)=-\frac{m}{2 \pi \hbar^{2}} V_{0} \frac{4}{3} \pi R^{3}$$
$$\text { And differential scattering is given by } \frac{d \sigma_{1}}{d \Omega}=|f|^{2}=\left(\frac{2 m V_{0} R^{3}}{3 \hbar^{2}}\right)^{2}$$
$$\text { Now } V(r)=2 V_{0} \text { for } r<R \Rightarrow \frac{d \sigma_{2}}{d \Omega}=\left(\frac{2 m\left(2 V_{0}\right) R^{3}}{3 \hbar^{2}}\right)^{2}=4\left(\frac{2 m V_{0} R^{3}}{3 \hbar^{2}}\right)^{2}=4 \frac{d \sigma_{1}}{d \Omega}$$
The correct optopn is \textbf{(a)}
\end{answer}
\begin{minipage}{\textwidth}
\item The differential cross-section for scattering by a target is given by
$$
\frac{d \sigma}{d \Omega}(\theta, \phi)=a^{2}+b^{2} \cos ^{2} \theta
$$
If $N$ is the flux of the incoming particles, the number of particles scattered per unit time is
\exyear{NET JUNE 2015}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{4 \pi}{3} N\left(a^{2}+b^{2}\right)$
\task[\textbf{B.}]$4 \pi N\left(a^{2}+\frac{1}{6} b^{2}\right)$
\task[\textbf{C.}]$4 \pi N\left(\frac{1}{2} a^{2}+\frac{1}{3} b^{2}\right)$
\task[\textbf{D.}]$4 \pi N\left(a^{2}+\frac{1}{3} b^{2}\right)$
\end{tasks}
\begin{answer}
$$\frac{d \sigma}{d \Omega}=a^{2}+b^{2} \cos ^{2} \theta$$
$$\sigma=a^{2} \int_{0}^{\pi} \int_{0}^{2 \pi} \sin \theta d \theta d \phi+b^{2} \int_{0}^{\pi} \cos ^{2} \theta \sin \theta d \theta \int_{0}^{2 \pi} d \phi=a^{2} .4 \pi+b^{2} .2 \pi \times \frac{2}{3}=4 \pi\left[a^{2}+\frac{b^{2}}{3}\right]$$
$$\text { Number of particle scattered per unit time, } \sigma \cdot N=4 \pi N\left(a^{2}+\frac{b^{2}}{3}\right)$$
The correct option is \textbf{(d)}
\end{answer}
\begin{minipage}{\textwidth}
\item A particle of energy $E$ scatters off a repulsive spherical potential
$$
V(r)=\left\{\begin{array}{ccc}
V_{0} & \text { for } & r<a \\
0 & \text { for } & r \leq a
\end{array}\right.
$$
where $V_{0}$ and $a$ are positive constants. In the low energy limit, the total scattering crosssection is $\sigma=4 \pi a^{2}\left(\frac{1}{k a} \tanh k a-1\right)^{2}$, where $k^{2}=\frac{2 m}{h^{2}}\left(V_{0}-E\right)>0$. In the limit $V_{0} \rightarrow \infty$ the ratio of $\sigma$ to the classical scattering cross-section off a sphere of radius $a$ is
\exyear{NET JUNE 2015}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}] 4
\task[\textbf{B.}]3
\task[\textbf{C.}]1
\task[\textbf{D.}]$\frac{1}{2}$
\end{tasks}
\begin{answer}
$\sigma=4 \pi a^{2}\left[\frac{1}{k a} \tanh k a-1\right]^{2}$\\\\
$k a \rightarrow \infty, \tanh k a \rightarrow 1 \Rightarrow \sigma=4 \pi a^{2}\left(\frac{1}{k a}-1\right)^{2}$\\\\
and $k a \rightarrow \infty, \lim _{k a \rightarrow \infty} \sigma_{H}=4 \pi a^{2}$\\\\
classically $\sigma_{c}=\pi a^{2} \quad \therefore \frac{\sigma_{H}}{\sigma_{c}}=4$\\
The correct option is \textbf{(a)}
\end{answer}
\begin{minipage}{\textwidth}
\item A particle is scattered by a central potential $V(r)=V_{0} r e^{-\mu r}$, where $V_{0}$ and $\mu$ are positive constants. If the momentum transfer $\vec{q}$ is such that $q=|\vec{q}| \gg \mu$, the scattering crosssection in the Born approximation, as $q \rightarrow \infty$, depends on $q$ as
[You may use $\left.\int x^{n} e^{a x} d x=\frac{d^{n}}{d a^{n}} \int e^{a x} d x\right]$
\exyear{NET DEC 2016}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}]$q^{-8}$
\task[\textbf{B.}]$q^{-2}$
\task[\textbf{C.}]$q^{2}$
\task[\textbf{D.}]$q^{6}$
\end{tasks}
\begin{answer}
The form factor is given for high energy as $q \rightarrow \infty$ \\
\begin{align*}
&f(\theta, \phi)=\frac{-2 m}{\hbar^{2} q} \int_{0}^{\infty} r V(r) \sin q r d r=\frac{-2 m}{\hbar^{2} q} \int_{0}^{\infty} r^{2} V_{0} e^{-\mu v} \sin q r d r\\
&=\frac{-2 m}{\hbar^{2} q} V_{0} \int_{0}^{\infty} r^{2} e^{-\mu r} \frac{e^{i q r}-e^{-i q r}}{2 i} d r=\frac{m V_{0}}{\hbar^{2} q} i\left[\int_{0}^{\infty} r^{2} e^{-r(\mu-i q)} d r-\int_{0}^{\infty} r^{2} e^{-r(\mu+i q)} d r\right] \\
&=\frac{m V_{0} i}{\hbar^{2} q}\left[\frac{\lfloor 2}{(\mu-i q)^{3}}-\frac{\lfloor 2}{(\mu+i q)^{3}}\right]=\frac{2 m V_{0} i}{\hbar^{2} q}\left[\frac{\left((\mu+i q)^{3}-(\mu-i q)^{3}\right)}{(\mu+i q)^{3}(\mu-i q)^{3}}\right] \\
&=\frac{2 m V_{0}}{\hbar^{2} q} \frac{i\left[\left(\mu^{3}-i q^{3}+3 \mu^{2} i q-3 \mu q^{2}\right)-\left(\mu^{3}+i q^{3}-3 \mu^{2} i q-3 \mu q^{2}\right)\right]}{\left(\mu^{2}+q^{2}\right)^{3}}\\
&=\frac{2 m V_{0} i}{\hbar^{2} q}\left[\frac{6 \mu^{2} i q-2 i q^{3}}{\left(\mu^{2}+q^{2}\right)^{3}}\right]=\frac{2 m V_{0}}{\hbar^{2} q}\left[\frac{2 q^{3}-6 \mu^{2} q}{\left(\mu^{2}+q^{2}\right)^{3}}\right] \\
&\propto \frac{q^{3}}{q}\left(2-\frac{6 \mu^{2}}{q^{2}}\right) \times \frac{1}{q^{6}\left(\frac{\mu^{2}}{q^{2}}+1\right)^{3}} \propto q^{2} \times \frac{1}{q^{6}} \propto \frac{1}{q^{4}} \quad\left(\because \frac{\mu^{2}}{q^{2}}<<1\right) \\
&\sigma(\theta) \propto|f(\theta)|^{2} \propto\left(q^{-4}\right)^{2}=q^{-8}
\end{align*}
The correct option is \textbf{(a)}
\end{answer}
\begin{minipage}{\textwidth}
\item Consider the potential
$$
V(\vec{r})=\sum_{i} V_{0} a^{3} \delta^{(3)}\left(\vec{r}-\vec{r}_{i}\right)
$$
where $\vec{r}_{i}$ are the position vectors of the vertices of a cube of length $a$ centered at the origin and $V_{0}$ is a constant. If $V_{0} a^{2}<<\frac{\hbar^{2}}{m}$, the total scattering cross-section, in the lowenergy limit, is
\exyear{NET JUNE 2017}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}] $16 a^{2}\left(\frac{m V_{0} a^{2}}{\hbar^{2}}\right)$
\task[\textbf{B.}]$\frac{16 a^{2}}{\pi^{2}}\left(\frac{m V_{0} a^{2}}{\hbar^{2}}\right)^{2}$
\task[\textbf{C.}]$\frac{64 a^{2}}{\pi}\left(\frac{m V_{0} a^{2}}{\hbar^{2}}\right)^{2}$
\task[\textbf{D.}]$\frac{64 a^{2}}{\pi^{2}}\left(\frac{m V_{0} a^{2}}{\hbar^{2}}\right)$
\end{tasks}
\begin{answer}
\begin{align*}
V(r) &=\sum_{i} V_{0} a^{3} \delta^{3}\left(\vec{r}-\vec{r}_{i}\right) \\
&=\sum_{i} V_{0} a^{3} \delta\left(x-x_{i}\right) \delta\left(y-y_{i}\right) \delta\left(z-z_{i}\right)
\end{align*}
where $x_{i}, y_{i}, z_{i}$ are co-ordinate at 8 corner cube whose center is at origin.\\\\
$
f(\theta)=-\frac{m}{2 \pi \hbar^{2}} \int V(r) d^{3} r
$
\begin{align*}
&=\frac{-m}{2 \pi \hbar^{2}} V_{0} a^{3} \int_{-\infty}^{\infty} \int \sum_{i=1}^{8} \delta\left(x-x_{i}\right) \delta\left(y-y_{i}\right) \delta\left(z-z_{i}\right) d x d y d z \\
&=\frac{-m}{2 \pi \hbar^{2}} V_{0} a^{3}[1+1+1+1+1+1+1+1] \\
&=\frac{-8 m V_{0} a^{3}}{2 \pi \hbar^{2}}=\frac{-4 m V_{0} a^{3}}{\pi \hbar^{2}}
\end{align*}
$\text { total scattering cross section } \sigma=\int|f(\theta)|^{2} \sin \theta d \theta d \phi$\\
$\text { Differential scattering cross section } D(\theta)=|f(\theta)|^{2}=\frac{16 m^{2} V_{0}^{2} a^{6}}{\pi^{2} \hbar^{4}}$\\
\begin{align*}
&=\frac{16 m^{2} V_{0}^{2} a^{6}}{\pi^{2} \hbar^{4}} 4 \pi=\frac{64 a^{2}}{\pi}\left(\frac{m^{2} V_{0}^{2} a^{4}}{h^{4}}\right) \\
&\sigma=\frac{64 a^{2}}{\pi}\left(\frac{m V_{0} a^{2}}{\hbar^{2}}\right)^{2}
\end{align*}
The correct option is \textbf{(c)}
\end{answer}
\begin{minipage}{\textwidth}
\item A phase shift of $30^{\circ}$ is observed when a beam of particles of energy $0.1 \mathrm{MeV}$ is scattered by a target. When the beam energy is changed, the observed phase shift is $60^{\circ}$. Assuming that only $s$-wave scattering is relevant and that the cross-section does not change with energy, the beam energy is
\exyear{NET DEC 2017}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}] $0.4 \mathrm{MeV}$
\task[\textbf{B.}]$0.3 \mathrm{MeV}$
\task[\textbf{C.}]$0.2 \mathrm{MeV}$
\task[\textbf{D.}]$0.15 \mathrm{MeV}$
\end{tasks}
\begin{answer}
$\sigma=\frac{4 \pi}{k^{2}} \sum_{l=0}^{\infty}(2 l+1) \sin ^{2}\left(\delta_{l}\right)$\\
only $s$-wave scattering is relevant $l=0$
$$
k=\sqrt{\frac{2 m E}{\hbar^{2}}}
$$
$$\sigma=\frac{4 \pi}{k^{2}} \sin ^{2} \delta_{0}=\frac{4 \pi \hbar^{2}}{2 m E} \sin ^{2} \delta_{0}$$
$\text { According to problem } \frac{\sin ^{2} 30}{0.1 \mathrm{MeV}}=\frac{\sin ^{2} 60}{E} \Rightarrow E=\frac{\sin ^{2} 60}{\sin ^{2} 30} \times 0.1 \mathrm{MeV}=0.3 \mathrm{MeV}$\\
The correct option is \textbf{(b)}
\end{answer}
\begin{minipage}{\textwidth}
\item The differential scattering cross-section $\frac{d \sigma}{d \Omega}$ for the central potential $V(r)=\frac{\beta}{r} e^{-\mu r}$, where $\beta$ and $\mu$ are positive constants, is calculated in thee first Born approximation. Its dependence on the scattering angle $\theta$ is proportional to ( $A$ is a constant below)
\exyear{NET JUNE 2018}
\end{minipage}
\begin{tasks}(2)
\task[\textbf{A.}] $\left(A^{2}+\sin ^{2} \frac{\theta}{2}\right)$
\task[\textbf{B.}]$\left(A^{2}+\sin ^{2} \frac{\theta}{2}\right)^{-1}$
\task[\textbf{C.}]$\left(A^{2}+\sin ^{2} \frac{\theta}{2}\right)^{-2}$
\task[\textbf{D.}]$\left(A^{2}+\sin ^{2} \frac{\theta}{2}\right)^{2}$
\end{tasks}
\begin{answer}
$f(\theta) \propto \int_{0}^{\infty} V(r) \sin k r d r \Rightarrow D(\theta) \propto|f(\theta)|^{2}$
\begin{align*}
&f(\theta) \propto \frac{1}{k} \int_{0}^{\infty} \beta \frac{e^{-\mu r}}{r} r \sin k r d r \\
&f(\theta) \propto \frac{1}{k} \int_{0}^{\infty} \frac{e^{-\mu r}}{r} r\left(\frac{e^{i k r}-e^{-i k r}}{2 i}\right) d r \Rightarrow \frac{1}{2 i k} \int_{0}^{\infty} e^{-\mu r} e^{i k r} d r-\int_{0}^{\infty} e^{-\mu r} e^{-i k r} d r \\
&\Rightarrow \frac{1}{2 i k}\left(\int_{0}^{\infty} e^{-r(\mu-i k)} d r-\int e^{-r(\mu+i k r)} d r\right) \Rightarrow \frac{1}{2 i k}\left[\frac{\mu+i k-\mu+i k}{\mu^{2}+k^{2}}\right]=\frac{2 i k}{2 i k}\left(\mu^{2}+k^{2}\right)^{-1} \\
&f(\theta) \propto \frac{1}{\left(\mu^{2}+k^{2}\right)}, \quad D(\theta)=\left(\frac{1}{\mu^{2}+k^{2}}\right)^{2}
\end{align*}
$D(\theta)=\left(\mu^{2}+k^{2}\right)^{-2}$, where $k \propto \sin \frac{\theta}{2}$\\
$D(\theta) \propto\left(\mu^{2}+\sin ^{2} \frac{\theta}{2}\right)^{-2}$ or $D(\theta)=\left(A^{2}+\sin ^{2} \frac{\theta}{2}\right)^{-2}$\\
The correct option is \textbf{(c)}
\end{answer}
\end{enumerate} | {
"alphanum_fraction": 0.5907587727,
"avg_line_length": 62.9248554913,
"ext": "tex",
"hexsha": "2a8c1e38d21803e1539173b3fd34d790946b2b56",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_forks_repo_path": "QM -CSIR/chapter/scattering theory solutions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_issues_repo_path": "QM -CSIR/chapter/scattering theory solutions.tex",
"max_line_length": 361,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_stars_repo_path": "QM -CSIR/chapter/scattering theory solutions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4746,
"size": 10886
} |
\section{Classes} % (fold)
\label{sec:classes}
\begin{frame}\frametitle{Defining our own objects}
\framesubtitle{}
So far, we have seen many objects in the course that come standard with Python.
\begin{itemize}
\item Integers
\item Strings
\item Lists
\item Dictionaries
\item etc
\end{itemize}
\pause
But often one wants to build (much) more complicated structures.
\end{frame}
\begin{frame}\frametitle{Hangman example}
Objects:
\begin{itemize}
\item Game
\item Agents (different versions)
\end{itemize}
\end{frame}
% \begin{frame}\frametitle{Consider `building' a house in Python}
% \framesubtitle{Analogy}
% Suppose you have a program that needs to store all information about houses.
% How are we storing all information about this house?
% \pause
% \begin{itemize}
% \item A house might be a list with two elements, one for rooms, one for construction information
% \item house = [\{bathroom: ..., kitchen: ...\}, [brick, wood, ...]]
% \pause
% \item For the rooms we might again want to know about what's in the room, what it's made off
% \item So bathroom = [materials, bathtub, sink], where materials is a list
% \end{itemize}
% \pause
% We get a terribly nested structure, impossible to handle!
% \end{frame}
% \begin{frame}\frametitle{Object Oriented Programming}
% \framesubtitle{}
% Construct our own objects
% \begin{itemize}
% \item House
% \item Room
% \item etc
% \end{itemize}
% \pause\vfill
% \begin{itemize}
% \item Structure in familiar form
% \item Much easier to understand
% \end{itemize}
% \end{frame}
\begin{frame}\frametitle{Object Oriented Programming}
\framesubtitle{}
Express computation in terms of objects, which are instances of classes
\begin{description}
\item[Class] Blueprint (only one)
\item[Object] Instance (many)
\end{description}
\pause\vfill
Classes specify attributes (data) and methods to interact with the
attributes.
\end{frame}
\begin{frame}\frametitle{Python's way}
\framesubtitle{The simple way}
In languages such as C++ and Java: data protection with
private and public attributes and methods.
\vfill
Not in Python: only basics such as inheritance.
\vfill
Don't abuse power: works well in practice and leads
to simple code.
\end{frame}
\begin{frame}\frametitle{Simplest example}
\framesubtitle{Finally some code}
\codeblock{code/classes_leaf.py}
\end{frame}
\begin{frame}[fragile]\frametitle{Initializing an object}
\framesubtitle{Constructor}
Define how a class is instantiated by defining the
\verb|__init__| \textit{method}.
\vfill
Seasoned programmer: in Python only one constructor
method.
\end{frame}
\begin{frame}\frametitle{Initializing an object}
\framesubtitle{An example}
The init or \textit{constructor} \textit{method}.
\codeblock{code/classes_init.py}
Note how we \textit{access} object \textit{attributes}.
\end{frame}
\begin{frame}\frametitle{Self}
\framesubtitle{}
The \texttt{self} parameter seems strange at first sight.
\vfill
It refers to the the object (instance) itself.
\vfill
Hence \texttt{self.color = color} sets the color of the object
\texttt{self.color} equal to the variable \texttt{color}.
\vfill
\end{frame}
\begin{frame}\frametitle{Another example}
\framesubtitle{}
Classes have \textit{methods} (similar to functions)
\codeblock{code/classes_stocks.py}
\pause
Recall: \textit{list.append()} or \textit{dict.items()}.
These are simply class methods!
\end{frame}
\begin{frame}\frametitle{Class attributes}
\framesubtitle{An example}
\codeblock{code/classes_class_attribute.py}
Class attributes are shared among all objects of that class.
\end{frame}
\begin{frame}\frametitle{Class hierarchy through inheritance}
It can be useful (especially in larger projects) to have a hierarchy of classes.
Example
\begin{itemize}
\item Animal
\begin{itemize}
\item Bird
\begin{itemize}
\item Hawk
\item Seagull
\item ...
\end{itemize}
\item Pet
\begin{itemize}
\item Dog
\item Cat
\item ...
\end{itemize}
\item ...
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}\frametitle{Inheritance}
Suppose we first define an abstract class
\codeblock{code/classes_animal.py}
\end{frame}
\begin{frame}\frametitle{Inheritance}
\framesubtitle{Dog}
We can define sub classes and inherit from another class.
\codeblock{code/classes_dog.py}
\end{frame}
% \begin{frame}\frametitle{Why inheritance}
% An abstract class that implements general functionality can be combined with
% several subclasses that implement details.
% \pause\vfill
% Example: statistical package with a general \textit{Model} class.
% Subclasses can include \textit{Linear regression}, \textit{Logistic regression}, etc.
% \end{frame}
\begin{frame}[fragile]\frametitle{Base methods}
\framesubtitle{}
Some methods to override
\begin{itemize}
\item \verb|__init__|: Constructor
\item \verb|__repr__|: Represent the object (machine)
\item \verb|__str__|: Represent the object (human)
\item \verb|__cmp__|: Compare
\end{itemize}
\end{frame}
% \begin{frame}[fragile]\frametitle{More useful methods}
% \framesubtitle{}
% Some more on this later!
% \begin{itemize}
% \item \verb|__contains__| for the \texttt{in} keyword
% \item \verb|__iter__| and \verb|next| for iterators
% \end{itemize}
% \end{frame}
\begin{frame}\frametitle{Example}
\framesubtitle{Rational numbers}
Implementing Rational numbers
\vfill
\codeblock{code/classes_rat.py}
\end{frame}
\begin{frame}\frametitle{Setup}
\framesubtitle{}
What information should the class hold?
\pause
\begin{itemize}
\item Numerator
\item Denominator
\end{itemize}
\end{frame}
\begin{frame}[fragile]\frametitle{Init}
\framesubtitle{Let's start coding}
Implement the \verb|__init__| method
\pause\vfill
\codeblock{code/classes_rat_init0.py}
\end{frame}
\begin{frame}\frametitle{Issues}
\framesubtitle{}
Issues?
\codeblock{code/classes_rat_init0.py}
\pause\vfill
Ignore the division by 0 for now, more on that later.
\end{frame}
\begin{frame}\frametitle{Greatest common divisor}
\framesubtitle{}
$\frac{10}{20}$ and $\frac{1}{2}$ are
the same rational.
\vfill
Implement a \texttt{gcd(a, b)} function that computes the greatest common
divisor of $a$ and $b$.
\vfill
\codeblock{code/classes_rat_gcd.py}
Exercise: Verify Euclidean Algorithm
\end{frame}
\begin{frame}\frametitle{Greatest common divisor}
\framesubtitle{Solution}
\codeblock{code/classes_rat_init.py}
\vfill
Why is this awesome?
\end{frame}
\begin{frame}\frametitle{Representing your class: Operator overloading}
Implement \texttt{\_\_repr\_\_} or \texttt{\_\_str\_\_}
early to \texttt{print}
\vfill
Debugging
\end{frame}
\begin{frame}[fragile]\frametitle{Operator overloading: adding two Rationals}
\framesubtitle{}
Add Rationals just like Ints and Doubles?\\
\verb|Rational(10,2) + Rational(4,3)|
To use \texttt{+}, we implement the \texttt{\_\_add\_\_} method
\vfill
\codeblock{code/classes_rat_add.py}
\end{frame}
\begin{frame}[fragile]\frametitle{Operator overloading: Comparing}
\framesubtitle{}
\verb|__cmp__| compares objects
\begin{itemize}
\item If \texttt{self} is smaller than \texttt{other}, return a negative value
\item If \texttt{self} and \texttt{other} are equal, return 0
\item If \texttt{self} is larger than \texttt{other}, return a positive value
\end{itemize}
\end{frame}
\begin{frame}\frametitle{More on Operator Overloading}
To learn more:
\vfill
Google `Python operator overloading'.
\end{frame}
% section classes (end)
| {
"alphanum_fraction": 0.654913023,
"avg_line_length": 22.3894736842,
"ext": "tex",
"hexsha": "52e91d6682796c535231bd166292a6e59bfca31e",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-05-13T07:36:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-24T03:31:02.000Z",
"max_forks_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "naskoch/python_course",
"max_forks_repo_path": "lectures/tex/classes.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "naskoch/python_course",
"max_issues_repo_path": "lectures/tex/classes.tex",
"max_line_length": 106,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "naskoch/python_course",
"max_stars_repo_path": "lectures/tex/classes.tex",
"max_stars_repo_stars_event_max_datetime": "2020-04-18T21:09:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-08-10T17:46:55.000Z",
"num_tokens": 2316,
"size": 8508
} |
% $Id$
\subsubsection{Restrictions and Future Work}
\begin{enumerate}
\label{XGrid:rest}
\item {\bf CAUTION:} The XGrid class and its APIs are only tested in a
uni-processor setup, however in principle it should also work multi-processor.
This limitation will be removed in a future release.
\item More convenient {\tt ESMF\_XGridCreate()} API will be provided in
the future that will not require a user to supply the interpolation matrix.
\end{enumerate}
| {
"alphanum_fraction": 0.7677419355,
"avg_line_length": 25.8333333333,
"ext": "tex",
"hexsha": "96b55b7015ec575112da0683b1f1027bb596faf1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dc0df81474587923735b1f075d26c47fc956d661",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sdeastham/GCHP_v11-02c_Paper",
"max_forks_repo_path": "ESMF/src/Infrastructure/XGrid/doc/XGrid_rest.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dc0df81474587923735b1f075d26c47fc956d661",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sdeastham/GCHP_v11-02c_Paper",
"max_issues_repo_path": "ESMF/src/Infrastructure/XGrid/doc/XGrid_rest.tex",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dc0df81474587923735b1f075d26c47fc956d661",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sdeastham/GCHP_v11-02c_Paper",
"max_stars_repo_path": "ESMF/src/Infrastructure/XGrid/doc/XGrid_rest.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 114,
"size": 465
} |
\chapter{Imputing missing data for inference}
| {
"alphanum_fraction": 0.7916666667,
"avg_line_length": 12,
"ext": "tex",
"hexsha": "f31c4caddf0ea63cfd166aabcf4195832a15ec7e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/statistics/missingInference/00-00-Chapter_name.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/statistics/missingInference/00-00-Chapter_name.tex",
"max_line_length": 45,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/statistics/missingInference/00-00-Chapter_name.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 12,
"size": 48
} |
%
% %CopyrightBegin%
%
% Copyright Ericsson AB 2017. All Rights Reserved.
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
% %CopyrightEnd%
%
\chapter{Implementation constants}
This appendix summarizes the constants that characterize a \StdErlang\ implementation
and requirements on the values for these constants. An implementation should
document the values, most of which are also available to programs.
\section*{Atoms}
The constant
\I{maxatomlength} (\S\ref{section:atoms}) must be at least $2^8-1$ ($255$)
and at most $2^{16}-1$ ($65\,535$).
The constant $\mathit{atom\_table\_size}$ gives
the size of the atom tables used when transforming terms to and from
the external representation (\S\ref{section:atom-tables}).
It must be at least $2^8$ ($256$).
\section*{Integers}
The integers in an implementation of \StdErlang\ are characterized by
five constants $\mathit{bounded}$, $\mathit{maxint}$, $\mathit{minint}$,
$\mathit{minfixnum}$ and $\mathit{maxfixnum}$
(\S\ref{section:integer-type}):
\begin{itemize}
\item There is no requirement on the constant $\mathit{bounded}$.
\item The constant $\mathit{maxint}$ is only relevant if $\it{bounded}=\B{true}$,
in which case $\mathit{maxint}$ must be at least $2^{59}-1$ (576\,460\,752\,303\,423\,487).
\item The only requirement on the constant $\mathit{maxfixnum}$ is the obvious
condition that $0 < \I{maxfixnum} \leq \I{maxint}$.
\item Either
$\I{minint} = -(\I{maxint}+1)$ or $\I{minint} = -\I{maxint}$ must hold.
\end{itemize}
\section*{Floats}
The floating-point numbers in an implementation of \StdErlang\ are characterized by
five constants $r$, $p$, $\mathit{emin}$, $\mathit{emax}$ and $\mathit{denorm}$
(\S\ref{section:float-type}):
\begin{itemize}
\item The radix $r$ should be even.
\item The precision $p$ should be such that $r^{p-1}\geq 10^6$.
\item For the constants $\mathit{emin}$ and $\mathit{emax}$ it should hold that
$\mathit{emin}-1 \leq k*(p-1)$ and $\mathit{emax} > k*(p-1)$,
with $k\geq 2$ and $k$ as large an integer as practical, and that
$-2 \leq (emin-1) + emax \leq 2$.
\item There is no requirement on the constant $\mathit{denorm}$.
\end{itemize}
\section*{Refs}
The refs in an implementation are characterized by two constants:
\I{refs\_bounded} and \I{maxrefs} (\S\ref{section:refs}).
If \I{refs\_bounded} is \B{true},
then the value of \I{maxrefs} must be at least XXX.
\section*{PIDs}
The PIDs in an implementation are characterized by two constants:
\I{pids\_bounded} and \I{maxpids} (\S\ref{section:pids}).
If \I{pids\_bounded} is \B{true},
then the value of \I{maxpids} must be at least XXX.
\section*{Ports}
The ports in an implementation are characterized by two constants:
\I{ports\_bounded} and \I{maxports} (\S\ref{section:ports}).
If \I{ports\_bounded} is \B{true},
then the value of \I{maxports} must be at least XXX.
\section*{Tuples}
The constant \I{maxtuplesize} (\S\ref{section:tuples})
must be at least $2^{16}-1$ ($65\,535$) and at most
$2^{32}-1$ ($4\,294\,967\,296$).
\section*{Scheduling}
The constant \I{normal\_advantage} (\S\ref{section:scheduling})
should be between $4$ and $32$.
| {
"alphanum_fraction": 0.7184734513,
"avg_line_length": 36.16,
"ext": "tex",
"hexsha": "012ec34445e6ff95c3275081052e8625f5efd774",
"lang": "TeX",
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2022-03-06T06:37:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-30T12:07:16.000Z",
"max_forks_repo_head_hexsha": "0d70db4d904c45678cb46de8f0f0f93eb35c66f3",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "LaudateCorpus1/spec",
"max_forks_repo_path": "src/es-limits.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "0d70db4d904c45678cb46de8f0f0f93eb35c66f3",
"max_issues_repo_issues_event_max_datetime": "2018-10-05T13:50:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-11-30T14:08:26.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "LaudateCorpus1/spec",
"max_issues_repo_path": "src/es-limits.tex",
"max_line_length": 91,
"max_stars_count": 44,
"max_stars_repo_head_hexsha": "0d70db4d904c45678cb46de8f0f0f93eb35c66f3",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "LaudateCorpus1/spec",
"max_stars_repo_path": "src/es-limits.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-27T16:15:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-11-30T12:10:20.000Z",
"num_tokens": 1116,
"size": 3616
} |
\newpage{}
\hypertarget{a003---10-categories}{%
\section{A003 - 10 categories}\label{a003---10-categories}}
\hypertarget{description}{%
\subsection{Description}}
To be better than A001 we could use a categorization.
For that we divide all tasks into 10 groups with similarities.
Our target here is to reduce the dispersion within those groups. E.g. we know that there are a lot of tasks below half an hour duration. A001 often claims they are more costly. If we could clearly say that a task would belong to that category we could reduce our error margin here.
But how can we accomplish that?
\subsubsection{A003.1}
We divide our tasks using a linear scale
\begin{enumerate}
\tightlist
\item smaller than 30 minutes,
\item bigger than 30 minutes and less than one hour,
\item bigger than 1 hour and less than 2 hours
\item bigger than 2 hour and less than 3 hours
\item bigger than 3 hour and less than 4 hours
\item bigger than 4 hour and less than 5 hours
\item bigger than 5 hour and less than 6 hours
\item bigger than 6 hour and less than 7 hours
\item bigger than 7 hour and less than 8 hours
\item bigger than 8 heures
\end{enumerate}
Our algorithm tries to identify significant words that are unique par category:
\begin{enumerate}
\tightlist
\item collect all words in a category
\item for all categories:
\begin{enumerate}
\tightlist
\item remove all words that are somehow mentioned in another category
\end{enumerate}
\end{enumerate} | {
"alphanum_fraction": 0.7669376694,
"avg_line_length": 36.9,
"ext": "tex",
"hexsha": "810e4554297dbc3a4c75e0819293bf87a752745c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_forks_repo_path": "Documentation/10000-_Algorithms/A003/index.tex",
"max_issues_count": 52,
"max_issues_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_issues_repo_issues_event_max_datetime": "2021-09-26T10:01:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-13T00:24:46.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_issues_repo_path": "Documentation/10000-_Algorithms/A003/index.tex",
"max_line_length": 281,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_stars_repo_path": "Documentation/10000-_Algorithms/A003/index.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-22T06:43:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-12T17:24:38.000Z",
"num_tokens": 373,
"size": 1476
} |
\chapter{Introduction}
\input{abstract.tex}
\begin{description}
\item[Straightforward:] Virtio devices use normal bus mechanisms of
interrupts and DMA which should be familiar to any device driver
author. There is no exotic page-flipping or COW mechanism: it's just
a normal device.\footnote{This lack of page-sharing implies that the implementation of the
device (e.g. the hypervisor or host) needs full access to the
guest memory. Communication with untrusted parties (i.e.
inter-guest communication) requires copying.
}
\item[Efficient:] Virtio devices consist of rings of descriptors
for both input and output, which are neatly laid out to avoid cache
effects from both driver and device writing to the same cache
lines.
\item[Standard:] Virtio makes no assumptions about the environment in which
it operates, beyond supporting the bus to which device is attached.
In this specification, virtio
devices are implemented over MMIO, Channel I/O and PCI bus transports
\footnote{The Linux implementation further separates the virtio
transport code from the specific virtio drivers: these drivers are shared
between different transports.
}, earlier drafts
have been implemented on other buses not included here.
\item[Extensible:] Virtio devices contain feature bits which are
acknowledged by the guest operating system during device setup.
This allows forwards and backwards compatibility: the device
offers all the features it knows about, and the driver
acknowledges those it understands and wishes to use.
\end{description}
\section{Normative References}\label{sec:Normative References}
\begin{longtable}{l p{5in}}
\phantomsection\label{intro:rfc2119}\textbf{[RFC2119]} &
Bradner S., ``Key words for use in RFCs to Indicate Requirement
Levels'', BCP 14, RFC 2119, March 1997. \newline\url{http://www.ietf.org/rfc/rfc2119.txt}\\
\phantomsection\label{intro:rfc4122}\textbf{[RFC4122]} &
Leach, P., Mealling, M., and R. Salz, ``A Universally Unique
IDentifier (UUID) URN Namespace'', RFC 4122, DOI 10.17487/RFC4122,
July 2005. \newline\url{http://www.ietf.org/rfc/rfc4122.txt}\\
\phantomsection\label{intro:S390 PoP}\textbf{[S390 PoP]} & z/Architecture Principles of Operation, IBM Publication SA22-7832, \newline\url{http://publibfi.boulder.ibm.com/epubs/pdf/dz9zr009.pdf}, and any future revisions\\
\phantomsection\label{intro:S390 Common I/O}\textbf{[S390
Common I/O]} & ESA/390 Common I/O-Device and Self-Description, IBM Publication SA22-7204, \newline\url{https://www.ibm.com/resources/publications/OutputPubsDetails?PubID=SA22720401}, and any future revisions\\
\phantomsection\label{intro:PCI}\textbf{[PCI]} &
Conventional PCI Specifications,
\newline\url{http://www.pcisig.com/specifications/conventional/},
PCI-SIG\\
\phantomsection\label{intro:PCIe}\textbf{[PCIe]} &
PCI Express Specifications
\newline\url{http://www.pcisig.com/specifications/pciexpress/},
PCI-SIG\\
\phantomsection\label{intro:IEEE 802}\textbf{[IEEE 802]} &
IEEE Standard for Local and Metropolitan Area Networks: Overview and Architecture,
\newline\url{http://www.ieee802.org/},
IEEE\\
\phantomsection\label{intro:SAM}\textbf{[SAM]} &
SCSI Architectural Model,
\newline\url{http://www.t10.org/cgi-bin/ac.pl?t=f&f=sam4r05.pdf}\\
\phantomsection\label{intro:SCSI MMC}\textbf{[SCSI MMC]} &
SCSI Multimedia Commands,
\newline\url{http://www.t10.org/cgi-bin/ac.pl?t=f&f=mmc6r00.pdf}\\
\phantomsection\label{intro:FUSE}\textbf{[FUSE]} &
Linux FUSE interface,
\newline\url{https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fuse.h}\\
\phantomsection\label{intro:eMMC}\textbf{[eMMC]} &
eMMC Electrical Standard (5.1), JESD84-B51,
\newline\url{http://www.jedec.org/sites/default/files/docs/JESD84-B51.pdf}\\
\phantomsection\label{intro:HDA}\textbf{[HDA]} &
High Definition Audio Specification,
\newline\url{https://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/high-definition-audio-specification.pdf}\\
\phantomsection\label{intro:I2C}\textbf{[I2C]} &
I2C-bus specification and user manual,
\newline\url{https://www.nxp.com/docs/en/user-guide/UM10204.pdf}\\
\phantomsection\label{intro:SCMI}\textbf{[SCMI]} &
Arm System Control and Management Interface, DEN0056,
\newline\url{https://developer.arm.com/docs/den0056/c}, version C and any future revisions\\
\end{longtable}
\section{Non-Normative References}
\begin{longtable}{l p{5in}}
\phantomsection\label{intro:Virtio PCI Draft}\textbf{[Virtio PCI Draft]} &
Virtio PCI Draft Specification
\newline\url{http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf}\\
\end{longtable}
\section{Terminology}\label{Terminology}
The key words ``MUST'', ``MUST NOT'', ``REQUIRED'', ``SHALL'', ``SHALL NOT'', ``SHOULD'', ``SHOULD NOT'', ``RECOMMENDED'', ``MAY'', and ``OPTIONAL'' in this document are to be interpreted as described in \hyperref[intro:rfc2119]{[RFC2119]}.
\subsection{Legacy Interface: Terminology}\label{intro:Legacy
Interface: Terminology}
Specification drafts preceding version 1.0 of this specification
(e.g. see \hyperref[intro:Virtio PCI Draft]{[Virtio PCI Draft]})
defined a similar, but different
interface between the driver and the device.
Since these are widely deployed, this specification
accommodates OPTIONAL features to simplify transition
from these earlier draft interfaces.
Specifically devices and drivers MAY support:
\begin{description}
\item[Legacy Interface]
is an interface specified by an earlier draft of this specification
(before 1.0)
\item[Legacy Device]
is a device implemented before this specification was released,
and implementing a legacy interface on the host side
\item[Legacy Driver]
is a driver implemented before this specification was released,
and implementing a legacy interface on the guest side
\end{description}
Legacy devices and legacy drivers are not compliant with this
specification.
To simplify transition from these earlier draft interfaces,
a device MAY implement:
\begin{description}
\item[Transitional Device]
a device supporting both drivers conforming to this
specification, and allowing legacy drivers.
\end{description}
Similarly, a driver MAY implement:
\begin{description}
\item[Transitional Driver]
a driver supporting both devices conforming to this
specification, and legacy devices.
\end{description}
\begin{note}
Legacy interfaces are not required; ie. don't implement them unless you
have a need for backwards compatibility!
\end{note}
Devices or drivers with no legacy compatibility are referred to as
non-transitional devices and drivers, respectively.
\subsection{Transition from earlier specification drafts}\label{sec:Transition from earlier specification drafts}
For devices and drivers already implementing the legacy
interface, some changes will have to be made to support this
specification.
In this case, it might be beneficial for the reader to focus on
sections tagged "Legacy Interface" in the section title.
These highlight the changes made since the earlier drafts.
\section{Structure Specifications}\label{sec:Structure Specifications}
Many device and driver in-memory structure layouts are documented using
the C struct syntax. All structures are assumed to be without additional
padding. To stress this, cases where common C compilers are known to insert
extra padding within structures are tagged using the GNU C
__attribute__((packed)) syntax.
For the integer data types used in the structure definitions, the following
conventions are used:
\begin{description}
\item[u8, u16, u32, u64] An unsigned integer of the specified length in bits.
\item[le16, le32, le64] An unsigned integer of the specified length in bits,
in little-endian byte order.
\item[be16, be32, be64] An unsigned integer of the specified length in bits,
in big-endian byte order.
\end{description}
Some of the fields to be defined in this specification don't
start or don't end on a byte boundary. Such fields are called bit-fields.
A set of bit-fields is always a sub-division of an integer typed field.
Bit-fields within integer fields are always listed in order,
from the least significant to the most significant bit. The
bit-fields are considered unsigned integers of the specified
width with the next in significance relationship of the bits
preserved.
For example:
\begin{lstlisting}
struct S {
be16 {
A : 15;
B : 1;
} x;
be16 y;
};
\end{lstlisting}
documents the value A stored in the low 15 bit of \field{x} and
the value B stored in the high bit of \field{x}, the 16-bit
integer \field{x} in turn stored using the big-endian byte order
at the beginning of the structure S,
and being followed immediately by an unsigned integer \field{y}
stored in big-endian byte order at an offset of 2 bytes (16 bits)
from the beginning of the structure.
Note that this notation somewhat resembles the C bitfield syntax but
should not be naively converted to a bitfield notation for portable
code: it matches the way bitfields are packed by C compilers on
little-endian architectures but not the way bitfields are packed by C
compilers on big-endian architectures.
Assuming that CPU_TO_BE16 converts a 16-bit integer from a native
CPU to the big-endian byte order, the following is the equivalent
portable C code to generate a value to be stored into \field{x}:
\begin{lstlisting}
CPU_TO_BE16(B << 15 | A)
\end{lstlisting}
\section{Constant Specifications}\label{sec:Constant Specifications}
In many cases, numeric values used in the interface between the device
and the driver are documented using the C \#define and /* */
comment syntax. Multiple related values are grouped together with
a common name as a prefix, using _ as a separator.
Using _XXX as a suffix refers to all values in a group.
For example:
\begin{lstlisting}
/* Field Fld value A description */
#define VIRTIO_FLD_A (1 << 0)
/* Field Fld value B description */
#define VIRTIO_FLD_B (1 << 1)
\end{lstlisting}
documents two numeric values for a field \field{Fld}, with
\field{Fld} having value 1 referring to \field{A} and \field{Fld}
having value 2 referring to \field{B}.
Note that $<<$ refers to the shift-left operation.
Further, in this case VIRTIO_FLD_A and VIRTIO_FLD_B
refer to values 1 and 2 of Fld respectively. Further, VIRTIO_FLD_XXX refers to
either VIRTIO_FLD_A or VIRTIO_FLD_B.
\newpage
| {
"alphanum_fraction": 0.7654989082,
"avg_line_length": 42.8170731707,
"ext": "tex",
"hexsha": "aa9ec1b0dbed5af516d94f953b115018800c62fe",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e5ea7851b402493b748d8e2ec639fe48e990475c",
"max_forks_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_forks_repo_name": "rafaelgieschke/virtio-spec",
"max_forks_repo_path": "introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e5ea7851b402493b748d8e2ec639fe48e990475c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_issues_repo_name": "rafaelgieschke/virtio-spec",
"max_issues_repo_path": "introduction.tex",
"max_line_length": 240,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e5ea7851b402493b748d8e2ec639fe48e990475c",
"max_stars_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_stars_repo_name": "rafaelgieschke/virtio-spec",
"max_stars_repo_path": "introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2681,
"size": 10533
} |
\newpage
\paragraph{}\label{comm:refs} % Hack to ensure that TOC link works
\addcontentsline{toc}{section}{\reftitle}
% \bibliographystyle{IEEEtran}
% \bibliographystyle{abbrvnat}
% \bibliographystyle{unsrtnat}
% \bibliographystyle{plainnat}
\bibliographystyle{dinat}
\bibliography{../anot.bib}
\newpage
| {
"alphanum_fraction": 0.7727272727,
"avg_line_length": 20.5333333333,
"ext": "tex",
"hexsha": "44d3c0bd8c935f6275ecfaa452945e8964e9860b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dd0ef8054700bf6bbf52439286b0b75a6ff6c2dc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "liamato/URV-TFG-Template",
"max_forks_repo_path": "sections/bibliography.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dd0ef8054700bf6bbf52439286b0b75a6ff6c2dc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "liamato/URV-TFG-Template",
"max_issues_repo_path": "sections/bibliography.tex",
"max_line_length": 66,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dd0ef8054700bf6bbf52439286b0b75a6ff6c2dc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "liamato/URV-TFG-Template",
"max_stars_repo_path": "sections/bibliography.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 107,
"size": 308
} |
\documentclass[a4paper,fleqn]{article}
%
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{cite}
\usepackage{calc}
\usepackage{array}
\usepackage{texdraw}
\usepackage{fancyvrb}
\paperwidth 210mm
\paperheight 297mm
\textwidth 160mm
\textheight 240mm
\hoffset 0mm
\voffset 0mm
\evensidemargin 0mm
\oddsidemargin 0mm
\topmargin 0mm
\headheight 0mm
\headsep 0mm
\parindent 0em
\parskip 0.5em
\footnotesep 1em
\setlength{\skip\footins}{6mm}
\setcounter{tocdepth}{1}
%========================================================================
% Shorthands
%========================================================================
\newcommand{\Version}{1.6.1}
\newcommand{\Digits}{\tx{Digits}}
\newcommand{\Digitsa}{\tx{digits()}}
\newcommand{\Digitsb}{\tx{Digits()}}
\newcommand{\Factor}{\tx{Factor}}
\newcommand{\Factorb}{\tx{Factor()}}
\newcommand{\Input}{\tx{Input}}
\newcommand{\Inputa}{\tx{input}}
\newcommand{\Inputb}{\tx{Input()}}
\newcommand{\Mouse}{\textsl{Mouse}}
\newcommand{\Number}{\tx{Number}}
\newcommand{\Numbera}{\tx{number()}}
\newcommand{\Numberb}{\tx{Number()}}
\newcommand{\Object}{\tx{Object}}
\newcommand{\Phrase}{\tx{Phrase}}
\newcommand{\Plus}{\tx{Plus}}
\newcommand{\Plusa}{\tx{plus()}}
\newcommand{\Plusb}{\tx{Plus()}}
\newcommand{\Print}{\tx{Print}}
\newcommand{\Printa}{\tx{print()}}
\newcommand{\Printb}{\tx{Print()}}
\newcommand{\Product}{\tx{Product}}
\newcommand{\Producta}{\tx{product()}}
\newcommand{\Productb}{\tx{Product()}}
\newcommand{\Sign}{\tx{Sign}}
\newcommand{\Signb}{\tx{Sign()}}
\newcommand{\Space}{\tx{Space}}
\newcommand{\Spacea}{\tx{space()}}
\newcommand{\Spaceb}{\tx{Space()}}
\newcommand{\Store}{\tx{Store}}
\newcommand{\Storea}{\tx{store()}}
\newcommand{\Storeb}{\tx{Store()}}
\newcommand{\String}{\tx{String}}
\newcommand{\Sum}{\tx{Sum}}
\newcommand{\Suma}{\tx{sum()}}
\newcommand{\Sumb}{\tx{Sum()}}
\newcommand{\follow}{\operatorname{follow}}
\newcommand{\sL}{\mathcal{L}} % Script L
\newcommand{\Pref}{\operatorname{Pref}}
\renewcommand{\emptyset}{\varnothing} % Empty set
%========================================================================
% Spacing in tables
%========================================================================
\newcommand{\dnsp}{\rule[-1.4ex]{0ex}{1ex}} % Space below text
\newcommand{\upsp}{\rule{0ex}{2.9ex}} % Space above text
\newcommand{\prop}{\rule[-0.4ex]{0ex}{2.5ex}} % Space in boxes
%========================================================================
% TT font
%========================================================================
%\newcommand{\tx}[1]{\small\texttt{#1}\normalsize}
\newcommand{\tx}[1]{\texttt{#1}}
%========================================================================
% Unordered list
%========================================================================
\newcommand{\ul}
{\begin{list}
{--}
{\setlength{\topsep}{0.5ex}
\setlength{\itemsep}{0ex}
\setlength{\parsep}{0ex}
\setlength{\itemindent}{0em}
\setlength{\labelwidth}{1em}
\setlength{\labelsep}{0.5em}
\setlength{\leftmargin}{1.5em}
}
}
\newcommand{\eul}{\end{list}}
%========================================================================
% entry
%========================================================================
\newcommand{\entrylabel}[1]{{#1}\dnsp\hfill}
\newenvironment{entry}
{\begin{list}{}%
{\renewcommand{\makelabel}{\entrylabel}%
\setlength{\labelwidth}{10pt}%
\setlength{\leftmargin}{\labelwidth+\labelsep}%
\setlength{\itemsep}{12pt}%
}%
}%
{\end{list}}
%========================================================================
% Texdraw macros
%========================================================================
\newcommand{\phrase}[6] % #1=class #2=position #3=value #4=top text #5=bottom text #6=ref
{
\linewd 0.1
\textref h:C v:C
\savecurrpos(*#6cx *#6cy) % Save lower left corner
\rmove (12 15) \savecurrpos (*#6tx *#6ty) % Save mid-points of box sides
\rmove (0 -15) \savecurrpos (*#6bx *#6by)
\move(*#6cx *#6cy)
\rmove (0 7.5) \savecurrpos (*#6lx *#6ly)
\rmove (24 0) \savecurrpos (*#6rx *#6ry)
\move(*#6cx *#6cy) % Draw box
\rlvec (24 0) \rlvec (0 15)
\rlvec (-24 0) \rlvec (0 -15)
\move(*#6cx *#6cy) \rmove (0 5) \rlvec(24 0) % .. with line inside
\move(*#6cx *#6cy) \rmove (12 12) \htext{\tx{#1}} % Class name
\move(*#6cx *#6cy) \rmove (12 7.5) \htext{\tx{#2}} % Consumed text
\move(*#6cx *#6cy) \rmove (12 2.2) \htext{\tx{#3}} % Semantic value
\move(*#6cx *#6cy) \rmove (12 18) \htext{\tx{#4}} % Text above
\move(*#6cx *#6cy)
}
\newcommand{\lowphrase}[6] % #1=class #2=position #3=value #4=no top text #5=no bottom text #6=ref
{
\linewd 0.1
\textref h:C v:C
\savecurrpos(*#6cx *#6cy) % Save lower left corner
\rmove (12 15) \savecurrpos (*#6tx *#6ty) % Save mid-points of box sides
\rmove (0 -15) \savecurrpos (*#6bx *#6by)
\move(*#6cx *#6cy)
\rmove (0 7.5) \savecurrpos (*#6lx *#6ly)
\rmove (24 0) \savecurrpos (*#6rx *#6ry)
\move(*#6cx *#6cy) % Draw box
\rlvec (24 0) \rlvec (0 15)
\rlvec (-24 0) \rlvec (0 -15)
\move(*#6cx *#6cy) \rmove (0 5) \rlvec(24 0) % .. with line inside
\move(*#6cx *#6cy) \rmove (12 12) \htext{\tx{#1}} % Class name
\move(*#6cx *#6cy) \rmove (12 7.5) \htext{\tx{#2}} % Consumed text
\move(*#6cx *#6cy) \rmove (12 2.2) \htext{\tx{#3}} % Semantic value
\move(*#6cx *#6cy)
}
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
% Title page
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\begin{document}
\pagestyle{empty}
\vspace*{\stretch{1}}
\begin{center}
\rule{\linewidth-20mm}{.5mm}
\bigskip
\Large \textbf{\textit{MOUSE}: FROM PARSING EXPRESSIONS\\TO A PRACTICAL PARSER}
\bigskip
\Large Version \Version
\bigskip
\Large Roman R. Redziejowski
\rule{\linewidth-20mm}{.5mm}
\vspace*{\stretch{1}}
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
% Abstract
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\normalsize
\parbox{0.875\linewidth}{
\noindent
Parsing Expression Grammar (PEG) is a new way to specify
recursive-descent parsers with limited backtracking.
The use of backtracking lifts the $LL(1)$ restriction usually imposed
by top-down parsers.
In addition, PEG can define parsers with integrated lexing.
\medskip
\noindent
\Mouse\ is a tool to transcribe PEG into an executable parser written in Java.
Unlike some existing PEG generators (e.g., \textsl{Rats!}), \Mouse\
does not produce a storage-hungry "packrat parser",
but a collection of transparent recursive procedures.
\medskip
\noindent
An integral feature of \Mouse\ is the mechanism for specifying
semantics (also in Java).
This makes \Mouse\ a convenient tool if one needs an ad-hoc language processor.
Being written in Java, the processor is operating-system independent.
\medskip
\noindent
This is a user's manual in the form of a tutorial
that introduces the reader to \Mouse\ through
a hands-on experience.}
\end{center}
\vspace*{\stretch{3}}
\begin{center}
May 13, 2014
\end{center}
\newpage
\vspace*{\stretch{1}}
\noindent
Copyright \copyright\ 2009, 2010, 2011, 2012, 2013, 2014 by Roman R. Redziejowski (\tx{www.romanredz.se}).
\noindent
The author gives unlimited permission to copy, translate and/or distribute
this document, with or without modifications,
as long as this notice is preserved,
and information is provided about any changes.
\noindent
The document is available from \tx{http://mousepeg.sourceforge.net}.
\newpage
\tableofcontents
\newpage
\vspace*{\stretch{1}}
\subsection*{Changes from version of January 6, 2012:}
\ul
\item Appendix C: Parser generated with \tx{-M} accepts 0 as argument of \tx{setMemo}.
\item Appendix D: New options: \tx{-t} and \tx{-C}
for tools \tx{TryParser} and \tx{TestParser}.
\eul
\subsection*{Changes from version of November 5, 2011:}
No new or modified functionality, only new version number.
(The new version contains a bug fix.)
\subsection*{Changes from version of October 15, 2011:}
\ul
\item New helper methods \tx{rule()} and \tx{isTerm()} described in Appendix B.
\eul
\subsection*{Changes from version of July 21, 2011:}
\ul
\item New parsing expressions: \tx{\textasciicircum[}$s$\tx{]},
$e_1\tx{*+}\,e_2$, and $e_1\tx{++}\,e_2$
described in Section~\ref{PEG}.
\item The use of \tx{++} illustrated in Section~\ref{errRec} on Example 10.
\item New helper method \tx{where()} described in Appendix B.
\item The use of \tx{where()} illustrated in Section~\ref{calc}
on Example 7.
\item Updated grammar in Appendix A.
\eul
\newpage
\pagestyle{plain}
\setcounter{page}{1}
\input{Parsers} % Recursive-descent parsers
\input{PEG} % Parsing Expression Grammar
\input{GetStarted} % Getting started
\input{FirstSteps} % The first steps
\input{Semantics} % Adding semantics
\input{RHS} % Understanding the "right-hand side"
\input{Realistic} % Getting more realistic
\input{Floating} % Let's go floating
\input{Backtracking} % What about backtracking?
\input{NotPackRat} % A mouse, not a pack rat
\input{FullArith} % Full arithmetic
\input{Tree} % Want a tree?
\input{Calculator} % Calculator with memory
\input{Errors} % Get error handling right
\input{Backtrack2} % Backtracking again
\input{FileInput} % Input from file
\input{Recovery} % Error recovery
\input{Features} % Miscellaneous features
\input{Deploying} % Deploying
\appendix
\input{AppendixA} % The grammar of Mouse PEG
\input{AppendixB} % Helper methods
\input{AppendixC} % Your parser class
\input{AppendixD} % Mouse tools
\newpage
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
% Bibliography
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\bibliographystyle{acm}
\bibliography{Bib}
\end{document}
| {
"alphanum_fraction": 0.6009833585,
"avg_line_length": 31.3827893175,
"ext": "tex",
"hexsha": "2f5c8bc49ce1b661d02700d3cd9c5b8702122005",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "celer/mouse",
"max_forks_repo_path": "Mouse/source/manual/Manual.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_issues_repo_issues_event_max_datetime": "2016-05-31T11:00:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-04-07T06:22:47.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "celer/mouse",
"max_issues_repo_path": "Mouse/source/manual/Manual.tex",
"max_line_length": 107,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "celer/mouse",
"max_stars_repo_path": "Mouse/source/manual/Manual.tex",
"max_stars_repo_stars_event_max_datetime": "2017-04-08T14:06:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-30T11:17:56.000Z",
"num_tokens": 3404,
"size": 10576
} |
\section{Study region}
Douala is the economic capital of Cameroon located at 04\textdegree03'N 9\textdegree41'E in Central Africa with a high population over 3 million inhabitants\cite{populationstatdouala2021}(Fig.\ref{fig:douala_city_map}). The problem of flooding is widespread in Cameroon\cite{bruckmann2019analyse,tangan2018community}. For instance, floods events have occurred up to 5-10 times a year in the capital and 1-5 times a year in rural areas such as Maga and Lagdo (Northern region)\cite{tangan2018community}. Since 1975, when the Bamendjin dam was constructed, the Ndop plain in northeastern Cameroon has experienced periodic flooding, especially during the rainy season\cite{sighomnou2005cameroon}. Limbe, a seaside town in the southwest region of Cameroon, was also heavily flooded in 2001, with over 2000 people homeless, and destroyed properties and infrastructure\cite{ngalim2020stakeholders}. In 2008, the geographical area of Nkolbisson, Cameroon, for example, was hit by two catastrophic floods\cite{bruckmann2019analyse,tangan2018community}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.8\linewidth]{figure/douala_city_map.png}
\caption{Location of the urban area and the main watersheds of Douala. (Source: Figure 5 from \cite{bruckmann2019analyse})}
\label{fig:douala_city_map}
\end{figure}
\subsection{Flood triggering mechanisms in Cameroon}
Poor waste management has been identified as a major cause of flooding in developing countries like Cameroon\cite{barthelemy2016legislative}. In Mefou, in central Cameroon and in the Dakar district of Douala (Fig.\ref{fig:dechet}), drains were observed to be clogged with plastic bottles and other solid waste\cite{gideonrole}. In another study by \cite{wung2019enhancing}, flooding in Limbe was discovered to be resulted from river channel blockage caused by indiscriminate dumping of refuse into the waterway and sediment deposition from upstream.
\begin{figure}[hbt!]
\centering
\includegraphics[height=4cm,keepaspectratio,width=0.8\linewidth]{figure/dechet.JPG}
\caption{Plastic pollution completely blocking a waterway in the Dakar district of Douala, Cameroon\cite{greenpeaceorg}
}
\label{fig:dechet}
\end{figure}
In the context of climate change, the increasing urbanization of the region is known to disregard drainage systems designed to contain runoff and the maximum volume of water that must flow through them during rainy periods. Thus, factors such as inadequate drains, uncontrolled waste disposal, and the nature of precipitation were considered common and important triggers to consider in mitigating and preparing for flooding in the region\cite{ngalim2020stakeholders}.
\subsection{Precipitation in Douala}
In Douala, flooding is common during the rainy season from March to October(Fig.\ref{fig:douala_city_map} and \ref{fig:climateprecipitation}). The Tongo Bassa watershed located in the heart of the great Cameroonian economic metropolis of Douala, is one of the most affected urban location of the city. Tongo Bassa occupies an area of approximately 4200 ha or 42 $km^2$. The Tongo Bassa basin is crossed by three rivers and is characterized by a gentle slope (0.1 to 0.7\%) which exposes it to the daily tide variations. Bonamoussadi, Bepanda and Makepe Missoke are the most frequently affected areas, distributed on both sides of the Tongo Bassa river.
\begin{figure*}[hbt!]
\centering
\includegraphics[width=0.5\linewidth]{figure/douala_climate.pdf}
\caption{Douala Average temperatures and precipitation (Littoral, Cameroon, 4.05°N 9.7°E)(Source: www.meteoblue.com)\cite{meteoblue}. The mean daily maximum (solid red line) shows the maximum temperature of an average day for every month for Douala. Likewise, mean daily minimum (solid blue line) shows the average minimum temperature. Warm days and cool nights (dashed red and blue lines) show the average of the hottest day and coldest night of each month of the last 30 years}
\label{fig:climateprecipitation}
\end{figure*}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.8\linewidth]{figure/douala.jpg}
\caption{Douala floods are frequents in July and August with several damage\cite{Afrik21}}
\label{fig:floodimage}
\end{figure}
This highly urbanized basin is subject to rapid runoff towards low-lying areas with limited infiltration and high sedimentation rate in drains. Floods in these areas frequently affect residential houses, goods and services due to their exposure and low coping capacities of inhabitants, causing damage and loss of lives. This case is pertinent to the following dates: August 2\textsuperscript{nd} to 3\textsuperscript{rd}, 2000, August 9\textsuperscript{th}, 2010, and more recently that of August 21\textsuperscript{st}, 2020, August 11\textsuperscript{th}, 2021, September 1\textsuperscript{st}, 16\textsuperscript{th} and 18\textsuperscript{th} 2021.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.8\linewidth]{figure/flood_distribution1984_2018.png}
\caption{Spatial distribution of floods (31 events recorded) in Douala districts over the period 1984-2018. (Source: Figure 5 from \cite{bruckmann2019analyse})}
\label{fig:flood_distribution1984_2018}
\end{figure}
\section{Methods}
\subsection{Synthetic Aperture Radar}
Synthetic Aperture Radar (SAR) is an active microwave remote sensing system in which a sensor sends a beam towards the ground and acquires the backscattered signals after interacting with the Earth's surface. Unlike optical satellite imagery, it is independent of solar electromagnetic energy and can capture high-resolution images during the day and night, in almost all-weather conditions, and through clouds \cite{Landuyt2019,WANG1995324}.
The scattering of objects on the SAR image is highly influenced by the terrain (geometry, features, etc.) and also acquisition properties (resolution, incident wave, ascending or descending-pass, etc.). In addition, the acquisition can be done by emitting and receiving horizontal (H) or vertical (V) polarization(cross-polarized (VH/HV) or co-polarized (HH/VV) acquisitions) that interacted differently with the ground. It therefore provides an additional information to characterize the phenomena of the observed region\cite{WANG1995324}. The best accuracy for flood mapping has been reported to be by using VH polarization configuration\cite{carreno2019flood}.
For a given mission of constant incidence angle and wavelength, the backscattering signal for a targeted area varies depending on the dielectric properties of the target, the physical shape of the scatterers in the target area of the resolution cell\cite{farr1993radar}. Water and metals represent objects with higher dielectric content than other materials and have a very large response. Therefore, if the geometric shape lies in front of the signal line of sight (such as the roofs of houses), the objects will appear bright because a strong signal is returned (or backscattered) to the sensor. On the other hand, if the surface is flat as a plane mirror, the incoming pulses reflect away from the sensor and they appear as a dark feature (flat water, etc.). Irregular geometries, such as vegetation cover, are grayed out because scattering occurs in all directions and only a small fraction of signals is reflected back to the sensor. Thus before flooding occurs, dry soil or vegetation would have a lower dielectric response. After an area has been flooded ,due to the high dielectric constant of water (80), the moisture content increases the returned signals. This response presents multiple reflections possibilities(specular reflection, double bounce, etc.) from the surface, which can make it difficult to extract the flood map, especially in vegetated (specular reflection or double-bounce within canopy) and urban areas (double bounce in buildings).
The SAR image has two major inherent limitations due to its angular viewing that leads to radiometric distortions or foreshortening and the diffraction induced speckle noises. SAR data exhibited salt and pepper noise are caused by a phenomenon inherent in the active coherent illumination system called speckles. These speckles are due to random constructive and destructive interferences in each resolution cell of the image, resulting in degradation of image quality and interpretation. Thus, before any application, these radar images must be pre-processed to remove the noises either by spatial filtering or by multi-looking operations\cite{argenti2013tutorial}.
In general, floods occur under severe weather conditions with heavy rainfall and dense cloud cover. These clouds hinder the effectiveness of optical satellite imagery\cite{sanyal2004}, hence, the use of SAR data for flood monitoring has become very common\cite{rao2006advantage}, and much research has demonstrated its effectiveness in flood events assessment \cite{martinez2007mapping}.
SAR-based flood detection techniques comprise thresholding-based methods \cite{Inglada2007}, image segmentation \cite{martinis2009towards}, statistical active contouring \cite{horritt2001flood}, rule-based classification \cite{pradhan2016new}, interferometric-SAR coherence analysis and data fusion approaches \cite{d2016bayesian}.
To improve accuracy, thresholding-based flood detection techniques have evolved by merging additional data with the topographic data.
\subsection{Change detection}
The United Nations Platform for Space-based Information for Disaster Management and Emergency Response (UN-SPIDER) has made available an advanced thresholding-based method that generates flood extent map and assessment of affected areas\cite{un-spider}.
The extent of a flood event is calculated using Sentinel-1 SAR data and a change detection method. This tool also includes an assessment of the number of people likely to be exposed, cropland and metropolitan areas affected, which can be cross-referenced with the generated flood extent layer and visualized in minutes.
This approach is suitable for developing countries as it uses the Google Earth Engine (GEE) cloud computing platform (https://code.earthengine.google.com) to process cloud-based remote sensing data. The main advantage is the speed of the computation, which is outsourced to Google's servers, as well as the availability of a variety of regularly updated datasets that are accessible directly in the code editor. Thus, it is possible to access the satellite data archive without having to download the raw data. The GEE GRD imagery includes the following steps: thermal noise removal, radiometric calibration, terrain correction. Therefore, only a speckle filter needs to be applied during pre-processing.
A change detection approach was chosen, where images before and after the flood event are compared. This is due to the difficulties of detecting the city of Douala, which is mainly composed of vegetation and a dense urban area. Using the basic histogram thresholding method, it is therefore difficult to distinguish flooded vegetation from urban flooding due to double-bounce backscatter\cite{manavalan2018review}.
Several supplemental datasets are used to suppress false positives in the flood extent layer. The European Commission's Joint Research Centre Global Surface Water dataset ('Source: EC JRC/Google', https://global-surface-water.appspot.com/) is used to mask all areas covered by water for more than 10 months per year with a spatial resolution of 30 m\cite{pekel2016high}.
To eliminate pixels with slopes greater than 5\%, the Hydrological data and maps based on SHuttle Elevation Derivatives at multiple Scales (HydroSHEDS) digital elevation model of 3 Arc-Seconds was used.
%\begin{figure*}[hbt!]
% \centering
% \includegraphics[width=0.6\linewidth]{figure/flood_mapping_GEE_workflow_1.png}
% \caption{ The UN-SPIDER Recommended Practice workflow for flood mapping and damage assessment using Sentinel-1 SAR data in Google Earth Engine (Source:UN-SPIDER)\cite{un-spider}}
% \label{fig:flood_mapping_GEE_workflow_1}
%\end{figure*}
\subsection{Sentinel 1}
Sentinel-1 is part of the space missions by the European Union and carried out by the European Space Agency (ESA) under the Copernicus program\cite{Panetti2014,geudtner2014sentinel}. This program aims to establish a global, continuous, autonomous, high quality and wide-range Earth observation capability.
The constellation of polar-orbiting Sentinel-1 satellites (Sentinel-1A and Sentinel-1B) provides continuous SAR data day and night with a revisit time of 6 days. The data provided by the Copernicus Open Access Hub are mainly Single Look Complex (SLC) used for interferometry and the Ground Range Detected (GRD)\cite{filipponi2019sentinel}. Sentinel-1 Level 1 GRD products consist of focused SAR data that are multi-looked and projected to ground range using an Earth ellipsoid model. These data are accessible via the GEE and were used to map a flood event in August 2020, in Douala.
Sentinel-1 in the GEE are provided in different polarizations, modes, passes and resolutions\cite{ gees1}:
\begin{enumerate}
\item Transmitter Receiver Polarization: ["VV"], ["HH"], ["VV", "VH"], or ["HH", "HV"]
\item Instrument Mode: "IW" (Interferometric Wide Swath), "EW" (Extra Wide Swath) or "SM" (Strip Map).
\item Orbit Properties pass: "ASCENDING" or "DESCENDING"
\item Spatial resolution meters: 10, 25 or 40
\item GRD resolution: "M" (medium) or "H" (high).
\end{enumerate}
The Sentinel 1 satellite acquired were single polarization data at a spatial resolution of 5 m × 20 m, a 250 km swath width of view and in VH polarization.
\subsection{Twitter}
Publicly available tweets are retrieved by using python libraries snscrape (https://github.com/JustAnotherArchivist/snscrape). In this study, we used two different keywords in the query – "Cameroon flood" and "Cameroun inundation" (French for "Cameroon flood" ). For each tweet, we extract and retain the following information: tweeted time, content, number of replies, number of retweets, and number of likes. The tweets retrieved include both original tweets and replies, but not retweets. This work reports and discusses only aggregated statistics of the tweets.
To retrieve useful common terms and conduct sentimental analysis of the tweets, we need to pre-process the content of the tweets using techniques in Natural Language Processing. Natural Language Toolkit (NLTK) python library was used to perform the following steps:
\begin{enumerate}
\item Remove links, mentions, and hashtag
\item Splitting sentences into words and punctuation marks, or tokenization
\item Remove stopwords such as articles, prepositions, and punctions that does not contribute to the meaning of the content
\item Reducing the words into a root form or lemmatization, i.e. convert 2nd and 3rd forms of the verbs to the base verb
\item Remove non-alphabetical characters and keep only words that contain three or more letters
\end{enumerate}
Using processed content from the tweets, we can determine the most common terms by using tf-idf vectorization. Term frequency of term t in document d is defined as:
\begin{equation}
tf(t,d)=n/N
\end{equation}
where n is the frequency of the term t in document d and N is the frequency of the term t in all documents in the database of the library used. The inverse document term frequency is given by:
\begin{equation}
tdf(t,d)=log(\frac{D}{d\in D: t \in D})
\end{equation}
where D is the total number of documents and ${d\in D: t \in D}$ represents the number of documents in which we find the term t. The product of term frequency and inverse document term frequency is called tf-idf. A more common term would have the tf-idf value of closer to zero. In our analysis, tf-idf vectorization using a machine learning python library, scikit-learn. Word clouds are then generated based on tf-idf values.
To conduct sentimental analysis, we use a python library Textblob. This library contains a trained models that could determine the polarity and subjectivity of a given text. Polarity ranges between -1 and 1 with positive values reflecting emotionally positive message and negative values reflecting emotionally negative messages. Those that are neutral would have polarity of 0. Subjectivity ranges between 0 and 1 with 1 being subjective and 0 being objective. Using both polarity and subjectivity would allow us to evaluate the sentiments of twitter users toward flooding issues in Cameroon.
| {
"alphanum_fraction": 0.8076222843,
"avg_line_length": 132.8870967742,
"ext": "tex",
"hexsha": "c1c748f2636b85fe7e053c73a54d5cbbc5c32add",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8496597dfcb5cddafe9c581f65a3c8a1b0fd3d8a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mudesire/stea_igcp",
"max_forks_repo_path": "IAC2021/content/methodology.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8496597dfcb5cddafe9c581f65a3c8a1b0fd3d8a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mudesire/stea_igcp",
"max_issues_repo_path": "IAC2021/content/methodology.tex",
"max_line_length": 1461,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8496597dfcb5cddafe9c581f65a3c8a1b0fd3d8a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mudesire/stea_igcp",
"max_stars_repo_path": "IAC2021/content/methodology.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3809,
"size": 16478
} |
\documentclass[../PiTest.tex]{subfiles}
\begin{document}
\subsection{System configuration}
The system will work on two machines, each with its own configuration, as explained in the next sections.
\subsubsection{Server}
The server application can generally run on any platform which supports \nodejs 8 and \docker, but the device which it will be deployed on is a Raspberry Pi B+.
\paragraph{Network}
The system requires at least a WiFi adapter, to generate the common network between the Raspberry and the smartphone, and another one to execute WiFi networks analysis.
The network architecture chosen is a station-client one and the Raspberry is the host, meaning it needs a hostapd service to control the WiFi adapter, a dhcpcd one to route dynamically the client and a dnsmasq one to route the traffic. The Raspberry will act as a WiFi router, using NAT on its clients.\\
The complete guide to setup the network tools on a Arch Linux system can be found in the server folder on the repository.
\paragraph{Installation and configuration}
To run the server application \docker and \nodejs and its package manager \npm must be installed on the host.\\
Download the latest version of PiTest form the repository using a browser or \textit{git}.\\
Build a \docker container to contain the application you want to run on the system; a \textit{Black Arch} dockerfile example is present in the server folder on the repository. It is possible to build as many container as needed to run all the applications wanted.\\
Install all the packages necessary using \texttt{npm install} inside the node server folder; after the operation has completed you can start the server with the command \texttt{npm start}.\\
In case you want to run all the tests of the server you can do it by using the command \texttt{npm test} inside the above mentioned folder.\\
It is good practice to create a service file to automate the execution of the server on the system boot using systemd; the procedure may vary according to the Linux distribution chosen.
\paragraph{Security}
All the \REST calls, except the ping, need a token parameter to authenticate the user. The token is unique in the system and can be configured in the config.json file in the server folder. Also the port used to connect to the server can be changed in the same file.
\subsubsection{Client}
\paragraph{Build}
The build process of the client is standard and automated thanks to \gradle. A default \textit{Android Studio} installation will be sufficient to build the application.
\paragraph{Setup}
The client needs an initial setup procedure to connect to the server; during which it will the user will be asked for the address of the server, the port used to connect to the server and the token for the authentication.
\end{document}
| {
"alphanum_fraction": 0.7958452722,
"avg_line_length": 77.5555555556,
"ext": "tex",
"hexsha": "f6df515353ab2e6027041864118341f83d39445c",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-09T22:18:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-09T22:18:10.000Z",
"max_forks_repo_head_hexsha": "6e44c551856054679350cddf55743713e5063552",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "bertof/PiTest",
"max_forks_repo_path": "Documentation/Sezioni/SystemConfiguration.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e44c551856054679350cddf55743713e5063552",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "bertof/PiTest",
"max_issues_repo_path": "Documentation/Sezioni/SystemConfiguration.tex",
"max_line_length": 304,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6e44c551856054679350cddf55743713e5063552",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "bertof/PiTest",
"max_stars_repo_path": "Documentation/Sezioni/SystemConfiguration.tex",
"max_stars_repo_stars_event_max_datetime": "2020-07-09T22:18:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-09T22:18:09.000Z",
"num_tokens": 598,
"size": 2792
} |
\documentclass{ercgrant}
\author{\textcolor{red}{Dr. John Doe}}
\acro{\textcolor{red}{ACRONYM}}
\title{\textcolor{red}{PROJECT TITLE}}
\institution{\textcolor{red}{Example Institute}}
% ====== BODY OF THE DOCUMENT ======
\begin{document}
\maketitle
\begin{abstract}
\textcolor{red}{
\input{summary.tex}
}
\end{abstract}
%%%%%%%%%%%%% EXTENDED SYNOPSIS %%%%%%%%%%%%%%%%%%%
\section{Extended Synopsis of the Scientific Proposal}
\subsection*{\textcolor{red}{General Background}}
\textcolor{red}{\blindtext[2]}
\subsection*{\textcolor{red}{Some Subsection}}
\textcolor{red}{\blindtext[2]}
\begin{figure}
\begin{center}
\missingfigure[figwidth=\hsize]{Put figure here}
%\includegraphics[width=\hsize]{figurename}
\caption{\textcolor{red}{\blindtext}}
\label{fig:example}
\end{center}
\vspace{-0.5cm}
\end{figure}
\textcolor{red}{\blindtext[2]}
\subsection*{\textcolor{red}{Subsection about the Project}}
\textcolor{red}{\blindtext[2]}
\subsubsection*{\textcolor{red}{Some Subsubsection}}
\textcolor{red}{\blindtext[2]}
\subsection*{\textcolor{red}{Other Subsection}}
\textcolor{red}{\blindtext[2]}
\subsection*{\textcolor{red}{Summary Section}}
\textcolor{red}{\blindtext[2]}
%%%%%%%%%%%%% BIBLIOGRAPHY %%%%%%%%%%%%%%%%%%%
% REMOVE THIS!!!!
\nocite{*} % to cite everything
\renewcommand\bibsection{\subsection*{\refname}}
\begin{small}
\bibliographystyle{aa}
\bibliography{bibliography}
\end{small}
%%%%%%%%%%%%% CURRICULUM VITAE %%%%%%%%%%%%%%%%%%%
\newpage
\section{Curriculum vitae}
\subsection*{Personal Information}
\begin{tabular}{p{5cm}l}
Last name, first name: & \textcolor{red}{Doe, John} \\[0.1cm]
Address: & \textcolor{red}{Institute} \\
& \textcolor{red}{Street} \\
& \textcolor{red}{Place} \\[0.1cm]
Nationality: & \textcolor{red}{fill} \\[0.1cm]
Date of birth: & \textcolor{red}{fill} \\[0.1cm]
Place of birth: & \textcolor{red}{fill} \\[0.1cm]
%###
Website: & \textcolor{red}{fill} \\[0.1cm]
%###
ORCID: & \textcolor{red}{fill}
\end{tabular}
\subsection*{Positions}
\begin{itemize}
\item \textbf{Postdoctoral Fellow}\hfill \textcolor{red}{from} -- \textcolor{red}{til}\\
at \textcolor{red}{fill}\\
\textcolor{red}{place}\\
Advisor: \textit{\textcolor{red}{fill}}\\[-0.75em]
\item \textbf{Postdoctoral Researcher} \hfill \textcolor{red}{from} -- \textcolor{red}{til} \\
at \textcolor{red}{fill}\\
Advisor: \textit{\textcolor{red}{fill}}
\end{itemize}
\subsection*{Education}
\begin{itemize}
\item \textbf{\textcolor{red}{Ph.D. Astronomy, A University}} \hfill \textcolor{red}{date}\\
Thesis: \textit{\textcolor{red}{fill}}\\
Advisor: \textit{\textcolor{red}{fill}}\\[-0.75em]
\end{itemize}
\subsection*{Teaching}
\subsection*{Other Achievements}
\subsection*{Other Services}
\subsection*{Major Collaborations}
\vspace*{1em}
{\small
\begin{tabular}{p{6cm}p{7cm}}
\textbf{\textcolor{red}{Prof. A}} & \textbf{\textcolor{red}{Prof. B}} \\
\textcolor{red}{Institute} & \textcolor{red}{Institute} \\
phone: \textcolor{red}{fill} & phone: \textcolor{red}{fill} \\
email: \textcolor{red}{fill} & email: \textcolor{red}{fill} \\
\\[-0.1cm]
\textbf{\textcolor{red}{Prof. X}} & \textbf{\textcolor{red}{Prof. Y}} \\
\textcolor{red}{Institute} & \textcolor{red}{Institute} \\
phone: \textcolor{red}{fill} & phone: \textcolor{red}{fill} \\
email: \textcolor{red}{fill} & email: \textcolor{red}{fill} \\
\end{tabular}
}
%%%%%%%%%%%%% APPENDIX %%%%%%%%%%%%%%%%%%%
\newpage
\section*{Appendix:\\ All ongoing and submitted grants and funding of the PI (Funding ID)}
\subsection*{On-going Grants}
\begin{footnotesize}
\def\arraystretch{1.5}
\begin{tabular}{|p{3.9cm}|p{1.6cm}|p{1.5cm}|p{2.4cm}|p{2.7cm}|p{2.2cm}|}
\hline
\rowcolor{black!20}\bf Project Title & \bf Funding source & \bf Amount\newline(Euros) & \bf
Period & \bf Role of the PI & \bf Relation to \newline current ERC \newline proposal \\
\hline
\textcolor{red}{fill} & \textcolor{red}{fill} & \textcolor{red}{\EUR{1}} &
\textcolor{red}{fill} & \textcolor{red}{fill} & \textcolor{red}{fill} \\
\hline
\end{tabular}
\end{footnotesize}
\subsection*{Applications}
\begin{footnotesize}
\def\arraystretch{1.5}
\begin{tabular}{|p{3.9cm}|p{1.6cm}|p{1.5cm}|p{2.4cm}|p{2.7cm}|p{2.2cm}|}
\hline
\rowcolor{black!20} \bf Project Title & \bf Funding source & \bf
Amount\newline(Euros) & \bf Period & \bf Role of the PI & \bf Relation to \newline current ERC \newline proposal \\
\hline
\textcolor{red}{fill} & \textcolor{red}{fill} & \textcolor{red}{\EUR{1}} & \textcolor{red}{fill} & \textcolor{red}{fill} & \textcolor{red}{fill}
\\
\hline
\end{tabular}
\end{footnotesize}
%%%%%%%%%%%%% APPENDIX %%%%%%%%%%%%%%%%%%%
\newpage
\section{Early achievements track-record}
\subsection*{Refereed Publications in Major Journals}
Items starting with a solid symbol ($\bullet$, \fstar) are publications without the PhD advisor as
co-author, others ($\circ$, \ostar) include the PhD advisor as co-author. Highlighted papers are
marked with a star symbol (\ostar, \fstar).\\
\begin{itemize}[topsep=0pt,itemsep=0.62ex,partopsep=0ex,parsep=0.5ex]
\input{pub_list.tex}
\end{itemize}
\subsection*{Talks}
\subsubsection*{\textit{Selected Colloquia}}
\begin{longtable}{p{7.5cm}p{6cm}l}
\textcolor{red}{Seminar} & \textcolor{red}{A University} & \textcolor{red}{date} \\
\textcolor{red}{Seminar} & \textcolor{red}{B University} & \textcolor{red}{date} \\
\end{longtable}
\subsubsection*{\textit{Selected Conference Talks}}
\begin{longtable}{p{7.5cm}p{6cm}l}
\textcolor{red}{Invited review on Blabla} & \textcolor{red}{City} & \textcolor{red}{date} \\
\end{longtable}
\end{document}
| {
"alphanum_fraction": 0.6148637756,
"avg_line_length": 32.1398963731,
"ext": "tex",
"hexsha": "24112028628bf0ecf556ca914f45605039c87505",
"lang": "TeX",
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2021-11-22T14:35:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-09-04T09:59:37.000Z",
"max_forks_repo_head_hexsha": "32805de8dd23ec7261de4b34477a6fad2a9e0118",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "birnstiel/erc_template",
"max_forks_repo_path": "erc_B1.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "32805de8dd23ec7261de4b34477a6fad2a9e0118",
"max_issues_repo_issues_event_max_datetime": "2015-10-19T09:18:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-10-19T09:18:44.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "birnstiel/erc_template",
"max_issues_repo_path": "erc_B1.tex",
"max_line_length": 198,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "32805de8dd23ec7261de4b34477a6fad2a9e0118",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "birnstiel/erc_template",
"max_stars_repo_path": "erc_B1.tex",
"max_stars_repo_stars_event_max_datetime": "2020-10-09T17:58:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-14T09:58:50.000Z",
"num_tokens": 2030,
"size": 6203
} |
%
% CMPT 320: Social Implications of a Computerized Society - A Course Overview
% Section: Privacy
%
% Author: Jeffrey Leung
%
\section{Privacy}
\label{sec:privacy}
\begin{easylist}
& \textbf{Privacy:}
&& Core aspects:
&&& Control of your information and other entities publishing
&&& Freedom from surveillance
&&& Freedom from intrusion
& Threats to privacy:
&& External access (through theft, leakage, personal mistakes, etc.)
&& Inappropriate/nonconsensual surveillance
&& Intentional secondary usage by insiders with access
&&& By private companies for uses such as marketing
&&& By governments for uses such as law enforcement
& \textbf{Fair information principles:}
&& Information collection:
&&& Inform people when collecting, about the content, and how it will be used
&&& Data is accurate and secure
&&& Only the data required
&&& Data kept only while needed
&&& Offer opt-outs
&&& Develop policies for responding to law enforcement requests (e.g. see \href{https://www.twilio.com/blog/2018/02/developers-guide-to-nsl.html}{Twilio})
& Organizational privacy:
&& \textbf{Audit trail:} Record of who accessed what information when
&& \textbf{Chief Privacy Officer:} Company position which manages privacy policy and its application
&& \textbf{Privacy audit:} Scan for information leakage, privacy policy health and compliance, and other privacy-related issues
\end{easylist}
\subsection{Legal Aspects}
\label{subsec:privacy:legal}
\begin{easylist}
& Legal systems:
&& \textbf{Case law:} Legal system used by England in which laws are general principles and judges rule on details for applying a law
&& \textbf{Positive law:} Legal system used by many European countries in which rules cover as many specific situations as possible
& Legal documents:
&& US Constitution Amendment 4 protects against unreasonable search and seizure except upon a warrant with probable cause
&& Canadian Charter Clause 8 protects against unreasonable search or seizure
& \textbf{Secondary use:} Application of personal information to a different purpose than its collection purpose
& \textbf{Free market viewpoint on privacy:} Users should have access to informed consent and freedom of choice to use of their information in exchange for benefits/services
&& E.g. Companies inform users about secondary uses and obtain informed consent, trading benefits for personal information
& \textbf{Consumer protection viewpoint on privacy:} Users have the right to privacy and are entitled to control over their information
& Information is generally protected when there is a reasonable expectation of privacy
& Canadian privacy laws vs. US:
&& More information control
&& More enforcement and regulation
&& Less governmental access
&& Provincial/federal Privacy Commissioners supervise activities
& European Union privacy laws vs. US:
&& Stronger privacy protections
&& Sending European personal data to US requires US recipient to follow Privacy Shield rules
&& \textbf{Right to be Forgotten:} European Court of Justice decreed that a person can require companies to remove links to certain kinds of personal information
& Related acts and legislations (in alphabetical order):
&& \textbf{Bill C-51 (2015, Canada):} Increased information access by law enforcement
&& \textbf{Communications Act (1934):} Interception of messages not allowed
&& \textbf{Communications Assistance for Law Enforcement Act by FCC (1994):} Telecommunications must allow governmental interception
&& \textbf{Electronic Communications Privacy Act (1986):} Extended OCCSSA wiretapping laws to electronic communications; restricts government access to email
&& \textbf{Foreign International Surveillance Act (1978):} Rules on communications between Americans and foreigners
&& \textbf{Omnibus Crime Control \& Safe Streets Act (1968):} Electronic surveillance by law enforcement with court order allowed
&& \textbf{Personal Information Protection and Electronic Documents Act (Canada)}
&& \textbf{Privacy Act (Canada):}
&&& Restricts governmentally-collected data to only that which is relevant and necessary
&&& Requires government notice of record system
&&& Allows access of your own records and correction of inaccurate information
&&& Requires procedures to secure data
&&& Prohibits unconsented information disclosure
&& Unnamed (2009): International call surveillance without court order allowed
\end{easylist}
\clearpage | {
"alphanum_fraction": 0.7802197802,
"avg_line_length": 51.2528735632,
"ext": "tex",
"hexsha": "ca8dac214962afb3c28140ab3da70a05b96b8838",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-12-27T21:44:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-18T09:17:46.000Z",
"max_forks_repo_head_hexsha": "c4640bbcb65c94b8756ccc3e4c1bbc7d5c3f8e92",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AmirNaghibi/notes",
"max_forks_repo_path": "cmpt-320-social-implications-of-a-computerized-society/tex/privacy.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c4640bbcb65c94b8756ccc3e4c1bbc7d5c3f8e92",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AmirNaghibi/notes",
"max_issues_repo_path": "cmpt-320-social-implications-of-a-computerized-society/tex/privacy.tex",
"max_line_length": 173,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "c4640bbcb65c94b8756ccc3e4c1bbc7d5c3f8e92",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AmirNaghibi/notes",
"max_stars_repo_path": "cmpt-320-social-implications-of-a-computerized-society/tex/privacy.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-09T02:37:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-08-11T08:45:10.000Z",
"num_tokens": 996,
"size": 4459
} |
Rust's main draw is its powerful static guarantees about behavior. But safety checks are conservative by nature: there are some
programs that are actually safe, but the compiler is not able to verify this is true. To write these kinds of programs, we need to
tell the compiler to relax its restrictions a bit. For this, Rust has a keyword, \code{unsafe}. Code using \code{unsafe} has less
restrictions than normal code does.
\blank
Let's go over the syntax, and then we'll talk semantics. \code{unsafe} is used in four contexts. The first one is to mark a function
as unsafe:
\begin{rustc}
unsafe fn danger_will_robinson() {
// scary stuff
}
\end{rustc}
All functions called from FFI must be marked as \code{unsafe}, for example (see \nameref{sec:effective_FFI}). The second use
of \code{unsafe} is an unsafe block:
\begin{rustc}
unsafe {
// scary stuff
}
\end{rustc}
The third is for unsafe traits:
\begin{rustc}
unsafe trait Scary { }
\end{rustc}
And the fourth is for \code{impl}ementing one of those traits:
\begin{rustc}
unsafe impl Scary for i32 {}
\end{rustc}
It's important to be able to explicitly delineate code that may have bugs that cause big problems. If a Rust program segfaults, you
can be sure the cause is related to something marked \code{unsafe}.
\subsection*{What does 'safe' mean?}
Safe, in the context of Rust, means 'doesn't do anything unsafe'. It's also important to know that there are certain behaviors that
are probably not desirable in your code, but are expressly \emph{not} unsafe:
\begin{itemize}
\item{Deadlocks}
\item{Leaks of memory or other resources}
\item{Exiting without calling destructors}
\item{Integer overflow}
\end{itemize}
Rust cannot prevent all kinds of software problems. Buggy code can and will be written in Rust. These things aren't great, but they
don't qualify as \code{unsafe} specifically.
\blank
In addition, the following are all undefined behaviors in Rust, and must be avoided, even when writing \code{unsafe} code:
\begin{itemize}
\item{Data races}
\item{Dereferencing a null/dangling raw pointer}
\item{Reads of \href{http://llvm.org/docs/LangRef.html\#undefined-values}{undef} (uninitialized) memory}
\item{Breaking the \href{http://llvm.org/docs/LangRef.html\#pointer-aliasing-rules}{pointer aliasing rules} with raw pointers.}
\item{\code{\&mut T} and \code{\&T} follow LLVM's scoped \href{http://llvm.org/docs/LangRef.html\#noalias}{noalias} model, except if
the \code{\&T} contains an \code{UnsafeCell<U>}. Unsafe code must not violate these aliasing guarantees.}
\item{Mutating an immutable value/reference without \code{UnsafeCell<U>}}
\item{Invoking undefined behavior via compiler intrinsics:}
\begin{itemize}
\item{Indexing outside of the bounds of an object with \code{std::ptr::offset} (\code{offset} intrinsic), with the exception of one
byte past the end which is permitted.}
\item{Using \code{std::ptr::copy\_nonoverlapping\_memory} (\code{memcpy32/memcpy64} intrinsics) on overlapping buffers}
\end{itemize}
\item{Invalid values in primitive types, even in private fields/locals:}
\begin{itemize}
\item{Null/dangling references or boxes}
\item{A value other than \code{false} (0) or \code{true} (1) in a \code{bool}}
\item{A discriminant in an \enum\ not included in its type definition}
\item{A value in a \varchar\ which is a surrogate or above \code{char::MAX}}
\item{Non-UTF-8 byte sequences in a \code{str}}
\end{itemize}
\item{Unwinding into Rust from foreign code or unwinding from Rust into foreign code.}
\end{itemize}
\subsection*{Unsafe Superpowers}
In both unsafe functions and unsafe blocks, Rust will let you do three things that you normally can not do. Just three. Here they are:
\begin{itemize}
\item{Access or update a static mutable variable (see \nameref{paragraph:static}.}
\item{Dereference a raw pointer.}
\item{Call unsafe functions. This is the most powerful ability.}
\end{itemize}
That's it. It's important that \code{unsafe} does not, for example, 'turn off the borrow checker'. Adding \code{unsafe} to some
random Rust code doesn't change its semantics, it won't start accepting anything. But it will let you write things that do break
some of the rules.
\blank
You will also encounter the \code{unsafe} keyword when writing bindings to foreign (non-Rust) interfaces. You're encouraged to write
a safe, native Rust interface around the methods provided by the library.
\blank
Let's go over the basic three abilities listed, in order.
\myparagraph{Access or update a \code{static mut}}
Rust has a feature called '\code{static mut}' which allows for mutable global state. Doing so can cause a data race, and as such
is inherently not safe. For more details, see the static section of the book (see \nameref{paragraph:static}).
\myparagraph{Dereference a raw pointer}
Raw pointers let you do arbitrary pointer arithmetic, and can cause a number of different memory safety and security issues. In
some senses, the ability to dereference an arbitrary pointer is one of the most dangerous things you can do. For more on raw pointers,
see their section of the book (\nameref{sec:syntax_rawPointers}).
\myparagraph{Call unsafe functions}
This last ability works with both aspects of \code{unsafe}: you can only call functions marked \code{unsafe} from inside an unsafe block.
\blank
This ability is powerful and varied. Rust exposes some compiler intrinsics as unsafe functions (see \nameref{sec:nightly_intrinsics}),
and some unsafe functions bypass safety checks, trading safety for speed.
\blank
I'll repeat again: even though you \emph{can do} arbitrary things in unsafe blocks and functions doesn't mean you should. The compiler
will act as though you're upholding its invariants, so be careful!
| {
"alphanum_fraction": 0.756844627,
"avg_line_length": 44.2727272727,
"ext": "tex",
"hexsha": "2fdf363ee69a49c239d1433bb546f81ad82e8104",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX",
"max_forks_repo_path": "src/syntax/unsafe.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX",
"max_issues_repo_path": "src/syntax/unsafe.tex",
"max_line_length": 137,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX",
"max_stars_repo_path": "src/syntax/unsafe.tex",
"max_stars_repo_stars_event_max_datetime": "2017-07-21T14:09:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-21T14:09:39.000Z",
"num_tokens": 1497,
"size": 5844
} |
\newpage
\chapter*{Abstract}
\addcontentsline{toc}{chapter}{Abstract}
Nowadays Modern Web applications are often deployed and executed on a Cloud infrastructure which provides a convenient on-demand approach for renting resources and easy-to-use horizontal scaling capabilities. The workload of Web applications is continuously changing over time and unexpected peaks of requests can happen, making the system unable to respond. For this reason the autonomic adaptation is an emerging solution to automatically adapt the resources allocated to the application according to the incoming traffic, cpu-utilization, and other metrics. Our ongoing autonomic initiative is based on the MAPE architecture (Monitor-Analyze-Plan-Execute). This thesis focuses on the Execute component.
While the state of the art solutions focus on adjusting the number of Virtual Machines allocated to the application, the containerization, a novel kind of virtualization that takes place at the operating system level, is emerging and is becoming popular. Containers are linux processes that can run sandboxed on a shared host operating system. This means that each container does not contain an entire operating system making this technology more lightweight and faster to boot comparing to Virtual Machines.
The contribution of this thesis is the implementation of the Execute component that exploits the usage of both Virtual Machines and containers enabling a faster and finer-grained adaptation and multi-layer adaptation. We consider not only the adaptation at the infrastructure layer, but we also adapt the middleware software that enables the execution of application specific code as application servers, DBMS and so on. We have implemented two approaches for the "Execute" component: the monolithic and the hierarchical one. The former consists of a centralized architecture where only monitoring sensors are distributed among the nodes, the latter consists in completely distributed architecture where all the MAPE components are replicated at each level of the hierarchy (container, VM, cluster of VMs, etc.).
\ifx
Modern Web applications are now often built on the Cloud infrastructure which provides the flexibility and the scalability for applications. Cloud solutions can be considered as environmentally friendly, as they can reduce the usage of the infrastructure resources, hence the electricity consumption and costs. The workload on the modern Web applications is continuously changing and we can exploit it to adapt applications by means of the resource allocation (Virtual Machines).
The implementation of our adaptation solution is based on the MAPE approach (Monitor-Analyze-Plan-Execute). While the Monitor-Analyze-Plan parts are out of the focus of this thesis, the work focuses on the Execute part.
The contributions of this thesis are to use the coarse-grained and fine-grained adaptations (Virtual Machines and containers) and also the multilayer adaptation. We consider not only the infrastructure, but also the middle-ware adaptations: application servers, DBMS and so on.
We have implemented two approaches for the "Execute" component: the monolithic and the hierarchical one. The scope of this thesis is the implementation details of both these approaches.
----
The world is moving to economy of the nature resources and environment-friendly solutions that can be called sustainable solutions. Approach of sustainability can be also applied to large computer infrastructures to adapt allocated resources depending on the load, on the time of the day or on other input.
Modern Web applications exploit Cloud infrastructures to
scale their resources and cope with sudden changes in the
workload. While the state of practice focuses on dynamically adding and removing virtual machines, we also implement and look on more fine-grained solution: operation system-level containerization.
In this thesis we present an autoscaling technique that allows containerized applications to scale their resources both at the VM level and at the container level. Furthermore, applications can combine this infrastructural adaptation with platform-level adaptation. The autoscaling is made possible by our planner, which consists of a discrete-time feedback
controller.
The work investigates coarse-grained virtualization techniques like virtual machine comparing to light-weight fine-grained techniques like operating system-level virtualization (containers virtualization). The scope of the thesis is implementation of both techniques and comparing the advantages and disadvantages of both of them.
The work has been validated using two application bench-
marks deployed to Amazon EC2. Our experiments show
that our planner outperforms Amazon’s AutoScaling by 78%
on average without containers; and that the introduction of
containers allows us to improve by yet another 46% on av-
erage.
----
This paper presents the research on Autonomic Systems with different types of virtualization. It were investigated coarse-grained virtualization techniques like virtual machines or cloud instances, and light-weight fine-grained techniques like operating system-level virtualization (containers). The thesis scope is implementation case of Autonomic System using different virtualization elements and considering the advantages and disadvantages of using containers for building Autonomic Systems.
\fi | {
"alphanum_fraction": 0.8310823312,
"avg_line_length": 125.6976744186,
"ext": "tex",
"hexsha": "0813951d3b153c1666a8f07221035e82446bdf67",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f7da3a5e8a01f6d9a83edabc6778683a5c225a5f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "n43jl/thesis",
"max_forks_repo_path": "abstract.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f7da3a5e8a01f6d9a83edabc6778683a5c225a5f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "n43jl/thesis",
"max_issues_repo_path": "abstract.tex",
"max_line_length": 812,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f7da3a5e8a01f6d9a83edabc6778683a5c225a5f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "n43jl/thesis",
"max_stars_repo_path": "abstract.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 956,
"size": 5405
} |
\documentclass[11pt]{article}
%%% PAGE DIMENSIONS
\usepackage{geometry}
\geometry{a4paper}
%%% PACKAGES
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{graphicx}
\usepackage[parfill]{parskip}
\usepackage{booktabs}
\usepackage{array}
\usepackage{paralist}
\usepackage{verbatim}
\usepackage{subfig}
\usepackage{cite}
\usepackage{amsmath}
\usepackage[colorinlistoftodos]{todonotes}
%%% HEADERS & FOOTERS
\usepackage{fancyhdr}
\pagestyle{fancy}
\renewcommand{\headrulewidth}{0pt}
\lhead{}\chead{}\rhead{}
\lfoot{}\cfoot{\thepage}\rfoot{}
%%% SECTION TITLE APPEARANCE
\usepackage{sectsty}
\allsectionsfont{\sffamily\mdseries\upshape}
%%% TABLE OF CONTENTS APPEARANCE
\usepackage[nottoc,notlof,notlot]{tocbibind}
\usepackage[titles,subfigure]{tocloft}
\renewcommand{\cftsecfont}{\rmfamily\mdseries\upshape}
\renewcommand{\cftsecpagefont}{\rmfamily\mdseries\upshape}
%%% DOCUMENT
\title{Mapping the Brain: An Introduction to Connectomics\\Progress Report: Streamlining Membrane Segmentation Pipeline}
\author{Thomas Keady, Albert Lee, Augusto Ramirez}
\date{\today}
\begin{document}
\maketitle
\section{Summary}
In the past week, the neuroXplorer project to streamline the MACHO pipeline has been updated to instead streamline the membrane segmentation pipeline. Currently, the project is running a few days behind the expected deadlines presented in the proposal. The latter is mostly due to technical difficulties with running the original pipeline and collecting data (at the beginning). We resolved this after meeting with Jordan, who explained ndio (which now allows us to retrieve membrane ROI (regions of interest) annotation and image data from the Open Connectome Project (OCP) server. Will guided us through running the pipeline by using the CAJAL application programming interface in Matlab. Also, we recently found out that we should be collecting data from the cv\_kasthuri11\_membrane\_2014 token instead of kasthuri2015\_ramon\_v1. This new change has allowed us to make progress in the python implementation for running watershed but has also presented some issues (ndio is missing the RAMONVolume functions or a simple OCPQuery module). We are working on watershed segmentation using scikit-image and managed to run it through python on the collected data successfully. The progress with watershed has allowed us to move on with correcting issues (image processing output) presented in the new batch of data (cv\_kasthuri11\_membrane\_2014).
\section{Updated Goals}
As mentioned earlier, the project has evolved from streamlining MACHO, with a vague sense of impact to quantifiable results, to streamlining the membrane segmentation pipeline's image processing steps. Working with watershed we now have a clearer project with a more defined goal - to enhance parameters to lower splitting and merging error rates. These new results from our streamlining experiment using watershed and python will lead to a documented optimal way to run the membrane segmentation pipeline. Our results can be passed to gala next, making the project potentially relevant to streamlining the i2g pipeline.
\subsection{Updated Timeline}
\begin{itemize}
\item 1/16/16: Get guidance from Will/Greg and/or Jordan on running OCPQuery (imageDense and probDense) from CAJAL on ndio. \newline
\item 1/17/16: Compare and contrast at least two sets of images through our pipeline and the current pipeline and most importantly, understand the results. \newline
\item 1/18/16: Have Matlab segments of the pipeline converted to working and integrated Python. \newline
\item 1/20/16: Begin implementing Kaynig gap completion code in Python as another step in the pre-pipeline processing. \newline
\item 1/22/16: Be able to run full pipeline plus our addition(s). \newline
\item End of class: Further reduce inaccuracies. \newline
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.8055268595,
"avg_line_length": 56.9411764706,
"ext": "tex",
"hexsha": "50a37c83819f29a68d4311570dab6990cc12c83e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d8d5897e9246df8707c852c1b78d617ff44ee062",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Connectomics-Classes/Neuro-x-polorer",
"max_forks_repo_path": "Progress Report/neuroXplorer-progress.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d8d5897e9246df8707c852c1b78d617ff44ee062",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Connectomics-Classes/Neuro-x-polorer",
"max_issues_repo_path": "Progress Report/neuroXplorer-progress.tex",
"max_line_length": 1346,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "d8d5897e9246df8707c852c1b78d617ff44ee062",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Connectomics-Classes/Neuro-x-polorer",
"max_stars_repo_path": "Progress Report/neuroXplorer-progress.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-19T09:49:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-16T00:56:58.000Z",
"num_tokens": 945,
"size": 3872
} |
One of the most important mechanisms in XtreemFS is the possibility to have
several replicas of a file distributed over the Grid. This feature affords
data-intensive applications achieving better performance as long as: there is no
single access point for the data and mechanisms for parallel access can be
exploited. Besides, replication also provides reliability and availability to
the filesystem, which is of vital importance for a distributed environment.
However, the usage of Grid resources such as network (where data is transfered
across) or storage (where data is stored) are finite, shared, and non-free.
Furthermore, the synchronization of the replicas of any given file involves
additional overheads, so that mechanisms that keep the tradeoff between the
benefits and the extra costs are needed.
For aiming at all of these purposes, we are working on the implementation of the
Replica Management Service. This service concerns about: selecting the best
replicas for the applications, creating and deleting replicas automatically
taking account of how and from where they are accessed and evaluating the
maximum number of replicas of any given file.
\subsection{Choosing the best replica}
\label{RMS_Choosing_Replicas}
When a given client (or an OSD) has to access a file, the question is: which
replica should it access? It should be able to detect which replica will provide
better performance. The idea to solve this problem is to build a virtual 2D
space and locate all replicas, OSDs, and clients in it. The distance
between two different objects (i.e replica, OSD, or client) is an indicator of
the distance (performance wise) of these two objects. Once a client wants to
access a file, it just needs to compute the euclidian distance between itself
and all replicas and choose the closer one.
\subsubsection{Vivaldi Algorithm}
Vivaldi is a light-weight algorithm developed by MIT \cite{dabek2004vdn}
that allows assigning a position in a coordinate space to every node in a network,
so the distance between the coordinates of two nodes predicts the real communication
latency between them.
In order to generate a valid coordinate system, it is necessary to determine
which space will be used and which formula will be used to calculate the
distance between two given points. In our case, it is been proved that
implementing a 2-D space, where the Euclidean distance between two coordinates
accurately predicts the latency between their corresponding nodes, generates
valid results with a really small error probability.
For the algorithm to work correctly, it is also necessary that the nodes of the
system keep contacting themselves randomly and indefinitely to re-adjust their
position, so any possible change in the network may be reflected. In each
re-adjustment, a node contacts a different neighbor, gets its coordinates and
modifies its own coordinates, so eventually the Euclidean distance is as similar
as possible to the measured round trip time.
On the other hand, once a group of nodes have established a valid coordinate
system, it is necessary to use some mechanism that helps to reduce the impact of
introducing new nodes, so we avoid them to alter the already stabilized system.
That is why Vivaldi keeps in every node, besides the coordinates, a local error
that informs about how sure a node is about its position. This way, a node with
a steadier position will have a smaller local error and will influence more the
rest of nodes when they contact it to readjust their position (figure
\ref{rms1}).
\begin{figure}[t]
\begin{center}
\includegraphics[width=0.60\textwidth]{images/rms1.png}
\caption{Nodes keep recalculating their position}
\label{rms1}
\end{center}
\end{figure}
Once the system is adjusted, any node of the network can determine which nodes
are the closest ones with a really simple calculation, in a very short period of
time and without generating extra traffic.
Some already developed implementations of Vivaldi can be found in p2psim and in
Chord. You might also be interested in Ledlie et al.'s work \cite{ledlie2007ncw}.
\subsubsection{Vivaldi in XtreemFS}
As we have different kinds of nodes in our architecture, not all of them work in
the same way to integrate Vivaldi. While the clients usually execute during
shorter periods of time, the servers are up and running , so the
idea is to let the OSDs (at this moment they are the only
servers that implement Vivaldi) establish a permanent coordinate system where a
client can move through, to find its position.
\subsubsection{Vivaldi in the OSDs}
An OSD has an independent stage responsible of managing Vivaldi on its side and
of providing to the rest of components a couple of valid coordinates that define
the position of the node in the current coordinate system.
The stage keeps running indefinitely and periodically contacts a different OSD
to ask it for its coordinates and its local error. With that data and the
coordinates of the own OSD is possible to compute the Euclidean distance and to
compare it with the real RTT measured against the contacted node.
The frequency an OSD readjusts its position is defined by the parameters
MIN\_\-TIMEOUT\_\-RECALCULATE and MAX\-\_TIMEOUT\_\-RECALCULATE. Just after performing a
readjustment, the stage typically calculates a random number included in the
interval of time defined by those two parameters and sleeps during that number
of seconds until the next iteration. This way we try to avoid generating traffic
peaks where all the nodes send a request at the same time and to distribute the
net use in time.
Larger periods will reduce the overhead in the network but will make the nodes
to adjust more slowly to the possible changes in the environment, while smaller
ones will require more traffic but will produce a more reactive system.
In each iteration, the introduced stage chooses a node to contact to from a list
of available OSDs, which is filled with the information contained in the
Directory Service. This list must be updated somehow so the stage can always
notice a node going offline.
\subsubsection{Vivaldi in clients}
In our system, the clients usually execute during a much shorter period of time,
so they have to be able to determine their position faster. This can be done
because they do not influence the rest of the nodes and they just take some
needed info from the already placed OSDs to locate themselves.
In Vivaldi, each node is responsible for its own coordinates and typically has to
recalculate them at least a small number of times before they represent the real
position in the coordinate system. Even if the set of OSDs is``adjusted'', a
client will need to recalculate its position (against one single node each time)
several times before having an accurate approximation of its real location.
Vivaldi requires that the nodes of the net generate traffic and communicate
among themselves.
As in the case of the OSDs, a client also has the parameters
MIN\_\-TIMEOUT\_\-RECALCULATE and MAX\_\-TIMEOUT\_\-RECALCULATE that allow defining the
recalculation period. Although the analogue parameters in the OSDs have the same
names, they are different parameters and therefore they all must be defined in
different files.
Finally, it is important to emphasize that after the first boot of the client,
it keeps its coordinates and preserves them among executions, so it remains well
located though it mounts and unmounts a lot of different volumes or opens and
closes a lot of files. The coordinates are not reinitialized until the client
node is rebooted.
\subsubsection{Replica Selection with Vivaldi}
Until this point we have introduced a mechanism able of establishing a
coordinate system where all the nodes of a network have a pair of coordinates
that allows them predicting the round trip time to the rest of neighbors. Now it
is time to analyze how to take advantage of that information and to describe the
current applications of Vivaldi in XtreemFS.
Sometimes during the execution of certain operations, the client has to choose
which replica access, among several replicas stored in different nodes of the
system. The ideal solution proposes to select always the replica that is stored
in the closest node, so the accesses can be made within the minimum time. But
the problem is that most of the times measuring the RTT against every OSD, for
each selection, is not computationally feasible.
Using the coordinates provided by Vivaldi, a client can calculate which replica
is the closest one with a practically insignificant delay. At this point the
only remaining problem seems to be how to gather all the coordinates so they can
be available in the exact moment of the replica selection.
As mentioned earlier, in Vivaldi the coordinates are managed
independently by the node they belong to and remain distributed among the
different nodes of the network. In order to let the client take advantage of
them, it is necessary to collect them in the MRC, so they can be included in
every list of x-locations.
\begin{figure}[t]
\begin{center}
\includegraphics[width=0.60\textwidth]{images/rms2.png}
\caption{Collecting the coordinates}
\label{rms2}
\end{center}
\end{figure}
In figure \ref{rms2} we show the following process:
\begin{enumerate}
\item HeartbeatThread is a component of the OSDs that periodically registers the
OSD in the Directory Service. The information that is uploaded each time is
determined by the function getServiceData, which is defined in the main
constructor of the class OSDRequestDispatcher.
\item OSDStatusManager is a component of the MRC that regularly queries the
Directory Service for available OSDs.
\item During an open operation, the MRC sends to a client a list of x-locations,
so it can locate the different replicas associated to a certain file. The
x-locations include the corresponding coordinates, so the client can use them to
predict the closest replica.
\end{enumerate}
\subsection{Replica creation}
\label{RMS_Replica_Creation}
Another important issue regarding replicas is to decide when and where to create
a replica. For this functionality we have three different mechanisms. The first
one is an explicit request from the user. In this scenario, the RMS will not
take any action. The second one is a reactive replica creation. The system will
detect that a replica is needed at a given location and will start a replica
creation. Finally, in the third case, the system will predict the usage of a
file in a location where no replicas are nearby and thus will try to create the
replica before it is used. We call to this third mechanism proactive replica
creation.
In both cases reactive and proactive, we plan to provide a mechanism able to
study the access pattern of files and use it to decide if only a part of the
file needs to be replicated (partial replication). This partial replicas will
speedup the process of replication because only part of the data will need to be
copied to the new location. Nevertheless if we miss-predict the parts of the
replica that will be used, we will always be able to populate the missing parts
on-demand (done directly by the OSDs).
%TODO: We could increase the level of detail in following subsubsections
\subsubsection{Reactive replica creation with Vivaldi}
In this scenario it is detected when replicas are currently needed in other
parts of the Grid. Then, using the distance mechanisms we just described in
Section \ref{RMS_Choosing_Replicas}, we will detect if clients request a replica
from large distances. So in this case Vivaldi could be used to decide a better
location for a replica and create it.
\subsubsection{Proactive replica creation with Oraculo}
We have implemented a service called Oraculo that carries out data mining on
multi-order context models to analyze the file-access patterns and to compute
future accesses. For the implementation of such multi-order context models,
Oraculo keeps a trie (or prefix tree) as Kroeger et al. did \cite{kroeger1996pfs,
kroeger2001dai} for centralized environments, where they proved that such structures
where effective.
Thus, each file access is modeled as a symbol (i.e. the file path) and it is
recorded as a node of the prefix tree with an associated value that represents
how many times the chain pattern from root to that node has ocurred.
Then, in order to interact with Oraculo, it provides a basic interface for:
\begin{enumerate}
\item Adding an event to the trie, given a sequence of the last events seen.
\item Getting a prediction from the trie, given a sequence of the last events
seen.
\end{enumerate}
So that when a file access is produced, it can be noticed to Oraculo which
computes which parts of the trie must be modified, adding the new event to the
corresponding branches or simply increasing the counter of the pattern if it is
already present.
Notice that in order to keep the trie scalable Oraculo can prune it in two ways.
First, keeping a predefined maximum number of nodes per branch. Thus, whenever a
new event goes to a full branch all its nodes divide their corresponding value
by two and nodes with a value lower than 1 are deleted from the trie. In the
case that no node has been cleaned, the new event is not added. But, obviously
the nodes in the branch keep the new value (after the division by two) so in the
near future it will be eventually possible to add new events in that branch.
On the other hand, Oraculo also has a maximum of root-branches (also called
partitions) to keep the horizontal scalability of the trie. Here we apply a
LRU-based algorithm among the partitions taking account of their usage as well.
Finally, Oraculo can predict future file accesses by applying basic data-mining
on the trie. It only needs to know some previous accesses to look for patterns
based on them. Then, an OSD could eventually use this information to replicate
data in advance.
Furthermore, we will propose a decoupled and off-line aggregation of the tries.
Once in a while (still to be determined), OSDs could contact other OSDs (only a
small subset) to exchange their trie information and build an aggregated one
that has the information of all. This mechanism will allow all OSDs to have a
more or less global view because what is learned by one OSD will be propagated
though several aggregations. We have done some preliminary tests using this
mechanism and seems to work very well with environments of many thousands of
nodes.
Regarding the information of the access pattern of files, in most cases the
information kept by a single OSD will be enough. Nevertheless, whenever we need
the full information of the pattern, we can contact all OSDs that have a replica
and aggregate the learned behavior. As we do not expect to keep many replicas of
a file, this procedure seems reasonable and scalable.
\subsubsection{Integration of Oraculo with OSDs}
Unfortunately, the integration of Oraculo with OSDs could not be done yet
because we are still evaluating it by using GridSim (a well-known Grid
simulator). Once we get significant results with the simulations we will
evaluate them and port Oraculo to the OSDs.
%%WARNING: This subsection is copy pasted from D3.4.4
%\subsection{Automatic setting of the number of replicas}
%\label{RMS_replica_limitation}
%The problem of having many replicas is that updates imply that a coordination
%mechanisms has to be started. This coordination will reduce performance and the
%magnitude clearly depends on the number of replicas available. For this reason
%we have decided to set a limit in the number of replicas a file will have.
%On the other hand, it is clear that the overhead this coordination will imply
%also depend on the frequency at which files are modified. For instance, if a
%file is only modified once a month (and only small modification are done) we
%could keep many more replicas than for a file that is constantly modified.
%The objective of this mechanism is to detect the access pattern of files and
%find the ratio between reads and writes. With this information the RMS will
%decide the maximum number of replicas that obtains a good tradeoff between the
%benefit of multiple replicas in read operations and the penalty of coordination
%in write operations.
%%WARNING: This subsection is copy pasted from D3.4.4
\subsection{Replica deletion}
\label{RMS_Replica_Deletion}
On the one hand, if we want to be able to replicate files whenever needed but
still maintain the maximum number for replicas per file, it would be interesting
to keep the number of replicas a bit smaller than the maximum. This difference
between the maximum and the real number of replicas would allow the system to
create replicas whenever needed. On the other hand, if replicas are not used, it
would also be nice to have them removed automatically to reduce disk usage in a
given node and/or center.
To tackle these two issues we will implement a mechanism that automatically
deletes the less important replicas. To know what replicas are less important we
will use similar mechanisms than the ones used to create replicas. We will
predict future usage using the same kind of tries. In addition we will perform
some kind of preventive removal of replicas, which means that whenever a node
decides to remove a replica it will inform other OSDs that have it to react
accordingly.
%%WARNING: This subsection is copy pasted from D3.4.4
\subsection{Interaction with the Application Execution Management}
\label{RMS_AEM_interaction}
The last mechanisms that we will implement to manage replicas consists of an
interaction with the application execution management system (AEM). This
interaction will be done in two steps.
Firstly, AEM analyzes the JSDL of the application and asks to XtreemFS
for the locations (coordinates X,Y) of its files' references. Thus AEM computes
an optimal coordinate around where the job should be executed. This coordinate
is used for AEM to send a request to Resource Selection Service (RSS) for a set
of nodes close to it. Of course, RSS also considers other requirements, such as
CPU or memory, to decide the resulting set of nodes.
In the second step, the AEM will inform XtreemFS on the final destination of a
given job and the files it will use. With this information, the RMS will decide
if new replicas need to be created to improve the I/O performance of this job.
In addition, and in some cases, it might be that the RMS decides to advance this
step from the information obtained in step 1. For instance, this may happen when
the list is made of nodes that are close among themselves and one or two
replicas could do the job.
Although this mechanism is very good in the sense that no prediction needs to be
done, it has a couple of limitations. The first one is that the AEM might not
know the files used by a job (it is not a requirement in the job description).
The second one is that there might not be enough time from the moment XtreemFS
receives the execution location of a job (and the files it uses) and the moment
the job starts running. To solve these two cases we have proposed the previous
prediction mechanisms (\ref{RMS_Replica_Creation}).
| {
"alphanum_fraction": 0.8064650678,
"avg_line_length": 53.2777777778,
"ext": "tex",
"hexsha": "c31b14a652d0e0a26c1dfbe6364d440a582fde4b",
"lang": "TeX",
"max_forks_count": 59,
"max_forks_repo_forks_event_max_datetime": "2022-02-22T07:33:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-07T23:53:19.000Z",
"max_forks_repo_head_hexsha": "e548deba8be2631ab0d2e47a1652941729cf70de",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "stanik137/xtreemfs",
"max_forks_repo_path": "doc/dev-guide/xtreemfs_rms.tex",
"max_issues_count": 84,
"max_issues_repo_head_hexsha": "e548deba8be2631ab0d2e47a1652941729cf70de",
"max_issues_repo_issues_event_max_datetime": "2022-03-10T21:11:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-05T10:14:40.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "stanik137/xtreemfs",
"max_issues_repo_path": "doc/dev-guide/xtreemfs_rms.tex",
"max_line_length": 88,
"max_stars_count": 270,
"max_stars_repo_head_hexsha": "e548deba8be2631ab0d2e47a1652941729cf70de",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "stanik137/xtreemfs",
"max_stars_repo_path": "doc/dev-guide/xtreemfs_rms.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-14T03:45:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-03T22:15:03.000Z",
"num_tokens": 4190,
"size": 19180
} |
\documentclass[output=paper,biblatex,babelshorthands,newtxmath,draftmode,colorlinks,citecolor=brown]{langscibook}
\ChapterDOI{10.5281/zenodo.5599850}
\IfFileExists{../localcommands.tex}{%hack to check whether this is being compiled as part of a collection or standalone
\usepackage{../nomemoize}
\input{../localpackages}
\input{../localcommands}
\input{../locallangscifixes.tex}
\togglepaper[18]
}{}
\author{Manfred Sailer\affiliation{Goethe-Unversität Frankfurt}}
\title{Idioms}
\abstract{This chapter first sketches basic empirical properties of idioms. The state of the art before the emergence of HPSG is presented, followed by a discussion of four types of HPSG approaches to idioms. A section on future research closes the discussion.}
\begin{document}
\maketitle
\label{chap-idioms}
%%% Index cross references
%\is{MWE|see{multiword expression}}
%%%
\section{Introduction}
\label{Sec-Intro}
%\section{Introduction}
In this chapter, I will use the term \emph{idiom} interchangeably with broader terms such as
\emph{phraseme}\is{idiom|(}, \emph{phraseologism}\is{phraseologism|see{idiom}},
\emph{phraseological unit}\is{phraseological unit|see{idiom}}, or \emph{multiword expression}.
This means, that I will subsume under this notion expressions such as prototypical idioms
(\bspT{kick the bucket}{die}), support verb constructions (\bsp{take advantage}), formulaic
expressions (\bsp{Good morning!}), and many more.\footnote{I will provide a paraphrase for all
idioms at their first mention. They are also listed in the appendix, together
with their paraphrase and a remark on which aspects of the idiom are discussed in the text.} The
main focus of the discussion will, however, be on prototypical idioms.
%In the rest of this section,
I will sketch some empirical aspects of idioms in Section~\ref{Sec-EmpiricalDomain}.
%and, then, characterize theoretical issues that arise in the formal modelling of idioms, both in general and with respect to HPSG in particular.
In Section~\ref{Sec-Predecessors}, I will present the theoretical context within which idiom analyses arose in HPSG.
An overview of the development within HPSG will be given in Section~\ref{Sec-Analyses}.
%Section~\ref{Sec-RecentOtherFrameworks} contains a brief sketch of the theoretical development outside HPSG.
Desiderata for future research are mentioned in Section~\ref{Sec-WhereToGo}, before I close with a short conclusion.
%\subsection
\section{Empirical domain}
\label{Sec-EmpiricalDomain}
%Defining the empirical domain of idioms and phraseology. I will aim at a very inclusive definition, i.e., more in lines with ``phraseology'' than with ``idioms'' in the strict sense.
%I will assume the basic characterization of a phraseological unit from \citet{Fleischer97a-u} and \citet{Burger:98} as \emph{complex} units that show at least one of \emph{fixedness}, \emph{(semantic) idiomaticity}, and \emph{lexicalization}.
\is{idiomaticity|(}
In the context of the present handbook, the most useful characterization of idioms might be the
definition of \emph{multiword expression}\is{multiword expression} from \citet[\page 269]{Baldwin:Kim:10}.
%\footnote{See also \citet{Sailer:18SemComp} for a more detailed summary of }
For them, any combination of words counts as a multiword expression if it is syntactically complex
and shows some degree of \emph{idiomaticity} (i.e., irregularity), be it lexical, syntactic,
semantic, pragmatic, or statistical.%
\footnote{In the phraseological tradition, the aspect of \emph{lexicalization} is added
\citep{Fleischer97a-u,Burger:98}. This means that an expression is stored in the lexicon. This
criterion might have the same coverage as \emph{conventionality} used in
\citet[492]{NSW94a}. These criteria are addressing the mental representation of idioms as a
unit and are, thus, rather psycholinguistic in nature.}
%
I speak of a ``combination of words'' in the sense of a \emph{substantive} or \emph{lexically filled idiom}, which
contrasts with \emph{formal} or \emph{lexically open idioms} \citep[505]{FKoC88a}.
\citeauthor{Baldwin:Kim:10}'s criteria can help us structure the data presentation in this section, expanding their criteria where it seems suitable.
My expansions concern the aspect known as \emph{fixedness} in the phraseological tradition as in \citet{Fleischer97a-u}.%
\footnote{\citet{Baldwin:Kim:10} describe idioms in terms of syntactic fixedness, but they seem to consider fixedness a derived notion.}
\begin{sloppypar}
\is{idiomaticity!lexical|(}%
For \citet{Baldwin:Kim:10}, \emph{lexical idiosyncrasy} concerns expressions with words that only
occur in an idiom, so-called \emph{phraseologically bound words}\is{bound word}, or \emph{cranberry
words}\is{cranberry word|see{bound word}} \citep[\page 15]{Aronoff76a-u}. Examples include \bspT{make headway}{make progress},
\bspT{take umbrage}{take offense},
%\bspT{at first blush}{at first sight},
\bspT{in a trice}{in a moment/""very quickly}.%
\footnote{
See https://www.english-linguistics.de/codii/, accessed 2019-09-03, for a list of bound words\is{bound word} in \ili{English} and \ili{German} \citep{Trawinski:al:08lrec}.}
For such expressions, the grammar has to make sure that the bound word does not occur outside the idiom, i.e., we need to prevent combinations such as \refer{trice-ko}.%
\footnote{Tom Wasow (p.c.) points out that there are attested uses of many alleged bound words\is{bound word} outside their canonical idiom, as in \pref{not-a-trice}. Such uses are, however, rare and restricted.
\ea
[]{Not a trice later, the sounds of gunplay were to be heard echoing from Bad Man's Rock. (COCA)\label{not-a-trice}}
\zlast
}
\end{sloppypar}
\eal \label{trice}
\ex []{They fixed the problem in a trice.}
\ex [*]{It just took them a trice to fix the problem.\label{trice-ko}}
\zl
\largerpage[-1]
We can expand this type of idiosyncrasy to include a second important property of idioms.
Most idioms have a fixed inventory of words. In their summary of this aspect of idioms, \citet[\page 827--828]{Gibbs:Colston:07} include the following examples: \bsp{kick the bucket} means `die', but \bsp{kick the pail}, \bsp{punt the bucket}, or \bsp{punt the pail} do not have this meaning. However, some degree of lexical variation seems to be allowed, as
the idiom \bspT{break the ice}{relieve tension in a strained situation} can be varied into \bsp{shatter the ice}.%
\footnote{\label{fn-semmeln}While \citet{Gibbs:Colston:07}, following \citet{Gibbs:al:89}, present this example as a lexical variation, \citew[\page 85]{Glucksberg:01}, from which it is taken, characterizes it as having a somewhat different aspect of an ``abrupt change in the social climate''. Clear cases of synonymy under lexical substitution are found with \ili{German} \bspTL{wie warme Semmeln/Brötchen/Schrippen weggehen}{like warm rolls vanish}{sell like hotcakes} in which some regional terms for rolls can be used in the idiom.}
%For example, \citet[\page 85]{Glucksberg:01} mentions the variation \bsp{shatter the ice} of the idiom \bspT{break the ice}{XXX}.
So, a challenge for idiom theories is to guarantee that the
right lexical elements are used in the right constellation.
\is{idiomaticity!lexical|)}
%\medskip%
\is{idiomaticity!syntactic|(}
\emph{Syntactic idiomaticity} is used in \citet{Baldwin:Kim:10} to describe expressions that are not
formed according to the productive rules of \ili{English} syntax, following \citet{FKoC88a}, such as
\emph{by and large}`on the whole'/""`everything considered' and \bspT{trip the light fantastic}{dance}.
In my expanded use of this notion, syntactic idiomaticity also subsumes
irregularities/""restrictions in the syntactic flexibility of an idiom, i.e., whether an idiom can
occur in the same syntactic constructions as an analogous non"=idiomatic combination. In
Transformational Grammar\is{Transformational Grammar}, such as \citet{Weinreich:69} and
\citet{Fraser:70}, lists of different syntactic transformations were compiled and it was observed
that some idioms allow for certain transformations but not for others. This method has been pursued
systematically in the framework of \emph{Lexicon-Grammar}\is{Lexicon-Grammar} \citep{Gross:82}.%
\footnote{See \citet{Laporte:18} for a recent discussion of applying this method for a classification of idioms.}
%
\citet{SBBCF2002a-u} distinguish three levels of fixedness: \emph{fixed}, \emph{semi-fixed}, and \emph{flexible}.
Completely fixed idioms include \bsp{of course}, \bsp{ad hoc} and are often called \emph{words with spaces}\is{word with spaces}.
Semi-fixed idioms allow for morphosyntactic variation such as inflection. These include some
prototypical idioms (\bsp{trip the light fantastic}, \bsp{kick the bucket}) and complex proper
names. In \ili{English}, semi-fixed idioms show inflection, but cannot easily be passivized, nor do
they allow for parts of the idiom being topicalized or pronominalized, see \refer{kick-ex}.
\eal \label{kick-ex}
\ex[]{Alex kicked / might kick the bucket.}
\ex[*]{The bucket was kicked by Alex.}
\ex[*]{The bucket, Alex kicked.}
\ex[*]{Alex kicked the bucket and Kim kicked it, too.}
\zl
\largerpage[-1]
Flexible idioms pattern with free combinations. For them, we do not only find inflection, but also
passivization, topicalization, pronominalization of parts, etc. Free combinations include some
prototypical idioms (\bspT{spill the beans}{reveal a secret}, \bspT{pull strings}{exert
influence'/""`use one's connections}), but also collocations (\bsp{brush one's teeth}) and light
verbs (\bsp{make a mistake}).
The assumption of two flexibility classes is not uncontroversial:
\citet{Horn:03} distinguishes two types among what \citet{SBBCF2002a-u} consider flexible idioms.
\citet{Fraser:70} assumes six flexibility classes, looking at a wide range of syntactic operations.
\citet{Ruwet:91} takes issue with the cross-linguistic applicability of the classification of
syntactic operations. Similarly, \citet{Schenk:95} claims that for languages such as \ili{Dutch} and
\ili{German}, automatic/meaningless syntactic processes other than just inflection are possible for
semi-fixed idioms, such as verb-second movement\is{word order!V2} and some types of fronting.
The analytic challenge of syntactic idiomaticity is to capture the difference in flexibility in a
non"=ad hoc way. It is this aspect of idioms that has received particular attention in Mainstream
Generative\is{Generative Grammar} Grammar (MGG),\footnote{%
I follow \citet[\page 3]{CJ2005a} in using the term \emph{Mainstream Generative Grammar} to
refer to work in Minimalism and the earlier Government \& Binding framework.}$^,$\footnote{See the
references in \citet{Corver:al:19} for a brief up-to-date overview of MGG work.}
but also in the HPSG approaches sketched in Section~\ref{Sec-Analyses}.
\is{idiomaticity!syntactic|)}
%\medskip%
\is{idiomaticity!semantic|(}
\emph{Semantic idiomaticity} may sound pleonastic, as, traditionally, an expression is called idiomatic if it has a conventional meaning that is different from its literal meaning.
Since I use the terms idiom and idiomaticity in their broad senses of phraseological unit and irregularity, respectively, the qualification \emph{semantic} idiom(aticity) is needed.
One challenge of the modeling of idioms is capturing the relation between the literal and the idiomatic meaning of an expression.
\citet{Gibbs:Colston:07} give an overview of
psycholinguistic research on idioms. Whereas it was first assumed that speakers would compute the literal meaning of an expression and then derive the idiomatic meaning, evidence has been accumulated that the idiomatic meaning is accessed directly.
\citet*{WSN84a-u} and \citet*{NSW94a} explore various semantic relations for idioms, in particular \emph{decomposability}\is{decomposability} and \emph{transparency}\is{transparency}.
An idiom is \emph{decomposable} if its idiomatic meaning can be distributed over its component parts in such a way that we would arrive at the idiomatic meaning of the overall expression if we interpreted the syntactic structure on the basis of such a meaning assignment.
The idiomatic meaning of the expression \bsp{pull strings} can be decomposed by interpreting \bsp{pull} as \bsp{exploit/use} and \bsp{strings} as \bsp{connections}.
%To make this criterion non"=arbitrary, it is now common to require that there be empirical support for such a decomposition. In \ili{English}, we can often insert an adjective
The expressions \bsp{kick the bucket} and \bspT{saw logs}{snore} are not decomposable.
\largerpage
An idiom is \emph{transparent} if there is a synchronically accessible relation between the literal
and the idiomatic meaning of an idiom. For some speakers, \bsp{saw logs} is transparent in this
sense, as the noise produced by this activity is similar to a snoring noise. For \bsp{pull
strings}, there is an analogy to a puppeteer controlling the puppets' behavior by pulling
strings. A non"=transparent idiom is called \emph{opaque}.
Some idioms do not show semantic idiomaticity at all, such as collocations\is{collocation} (\bsp{brush one's teeth}) or support verb constructions (\bsp{take a shower}).
Many body-part expressions such as \bspT{shake hands}{greet} or \bspT{shake one's head}{decline/""negate} constitute a more complex case: they describe a conventionalized activity and denote the social meaning of this activity.%
\footnote{The basic reference for the phraseological properties of body-part expressions is \citew{Burger:76}.}
In addition, we might need to assume a \emph{figurative} interpretation. For some expressions, in particular proverbs\is{proverb} or cases like
\bspT{take the bull by the horns}{approach a problem directly} we might get a figurative reading rather than an idiomatic reading.
%
\citet{Glucksberg:01} explicitly distinguishes between idiomatic and figurative interpretations. In his view, the above-mentioned case of \bsp{shatter the ice} would be a figurative use of the idiom \bsp{break the ice}.
While there has been a considerable amount of work on figurativity in psycholinguistics, the integration of its results into formal linguistics is still a desideratum.%
\is{idiomaticity!semantic|)}
%\medskip%
\is{idiomaticity!pragmatic|(}
\emph{Pragmatic idiomaticity} covers expressions that have a \emph{pragmatic point} in the terminology of \citet{FKoC88a}. These include complex formulaic expressions (\bsp{Good morning!}). There has been little work on this aspect of idiomaticity in formal phraseology.
\is{idiomaticity!pragmatic|)}
%\medskip%
\is{idiomaticity!statistical|(}%
The final type of idiomaticity is \emph{statistical idiomaticity}.
Contrary to the other idiomaticity criteria, this is a usage-based aspect. If we find a high degree
of co-occurrence of a particular combination of words that is idiosyncratic for this combination, we
can speak of a statistical idiomaticity. This category includes
\emph{collocations}\is{collocation}. \citet{Baldwin:Kim:10} mention \bsp{immaculate performance} as
an example. Collocations are important in computational linguistics and in foreign-language
learning, but their status for theoretical linguistics and for a competence-oriented\is{competence}
framework such as HPSG is unclear.%
\is{idiomaticity!statistical|)}
%\bigskip%
\largerpage
This discussion of the various types of idiomaticity shows that idioms do not form a homogeneous empirical domain but rather are defined negatively.
This leads to the basic analytical challenge of idioms: while the empirical domain is defined by absence of regularity in at least one aspect, idioms largely obey the principles of grammar.
In other words, there is a lot of regularity in the domain of idioms, while any approach still needs to be able to model the irregular properties.%
%I have tried to introduce the notions that are most commonly used in HPSG research and to identify the analytical problems related to them.
\is{idiomaticity|)}
%\subsection{Language-theoretical interest in idioms}
%\label{Sec-TheoreticalInterest}
%\begin{itemize}
%\item between rule-based and idiosyncratic
%\item compositional challenge and collocational challenge
%\end{itemize}
\section{Predecessors to HPSG analyses of idioms}
\label{Sec-Predecessors}
In this section, I will sketch the theoretical environment within which HPSG and HPSG analyses of idioms have emerged.
The general assumption about idioms in MGG\is{Generative Grammar} is that they
must be represented as a complex phrasal form-meaning unit.
Such units are inserted \emph{en bloc}\is{en bloc insertion} into the structure rather than built by syntactic operations.
This view goes back to \citet[\page 190]{Chomsky:65}.
With this unquestioned assumption, arguments for or against particular analyses can be constructed.
To give just one classical example, \citet[\page 85]{Chomsky81a} uses the passivizability of some idioms as an argument for the existence of Deep Structure, i.e., a structure on which the idiom is inserted holistically.
%
\citet{Ruwet:91} and \citet{NSW94a} go through a number of such lines of argumentation showing their basic problems.
The holistic view on idioms is most plausible for idioms that show many types of idiomaticity at the same time, though it becomes more and more problematic if only one or a few types of idiomaticity are attested.
HPSG is less driven by analytical pre-decisions than other frameworks; see
\crossrefchaptert[Section~\ref{minimalism:sec-formalization-exhaustivity}]{minimalism}. Nonetheless, idioms have been used to motivate assumptions about the architecture of linguistic signs in HPSG as well.
\citet{WSN84a-u} and \citet{NSW94a} are probably the two most influential papers in formal phraseology in the last decades.
%These papers have also shaped the analysis of idioms in \emph{Generalized Phrase Structure Grammar} \citep{GKPS85a}\is{Generalized Phrase Structure Grammar} and, consequently in HPSG.
While there are many aspects of \citet{NSW94a} that have not been integrated into the formal modeling of idioms,
there are at least two insights that have been widely adapted in HPSG.
First, not all idioms should be represented holistically.
Second, the syntactic flexibility of an idiom is related to its semantic decomposability.
In fact, \citet{NSW94a} state this last insight even more generally:%
\footnote{Aspects of this approach are already present in \citet{Higgins:74} and \citet{Newmeyer:74}.}
%\ea \label{NSW-quote} \citet[\page 531]{NSW94a}:
\largerpage
\begin{quote}
We predict that the syntactic flexibility of a particular idiom will ultimately be explained in
terms of the compatibility of its semantics with the semantics and pragmatics of various
constructions.
\citep[\page 531]{NSW94a}\label{NSW-quote}
\end{quote}
%\z
%One of the theoretical predecessors of HPSG is \emph{Generalized Phrase Structure Grammar} \citep{GKPS85a}\is{Generalized Phrase Structure Grammar}.
\citet{WSN84a-u} and \citet{NSW94a} propose a simplified first approach to a theory that would be in
line with this quote. They argue that, for \ili{English}, there is a correlation between syntactic
flexibility and semantic decomposability in that non"=decomposable idioms are only semi-fixed,
whereas decomposable idioms are flexible, to use our terminology from
Section~\ref{Sec-EmpiricalDomain}.
\indexgpsgstart
This idea has been directly encoded formally in the idiom theory of
\citet*[Chapter~7]{GKPS85a}, who define the framework of
\emph{Generalized Phrase Structure Grammar}\indexgpsg (GPSG).
\citet{GKPS85a} assume that non"=decomposable idioms are inserted into sentences \emph{en
bloc}\is{en bloc insertion}, i.e., as fully specified syntactic trees which are assigned the
idiomatic meaning holistically. This means that the otherwise strictly context-free grammar of GPSG
needs to be expanded by adding a (small) set of larger trees. Since non"=decomposable idioms are
inserted as units, their parts cannot be accessed for syntactic operations such as passivization or
movement. Consequently, the generalization about semantic non"=decomposability and syntactic
fixedness of \ili{English} idioms from \citet{WSN84a-u} is implemented directly.
Decomposable idioms are analyzed as free combinations in syntax. The idiomaticity of such
expressions is achieved by two assumptions: First, there is lexical ambiguity, i.e., for an idiom
like \bsp{pull strings}, the verb \bsp{pull} has both a literal meaning and an idiomatic
meaning. Similarly for \bsp{strings}. Second, \citet{GKPS85a} assume that lexical items are not
necessarily translated into total functions but can be partial functions. Whereas the literal
meaning of \bsp{pull} might be a total function, the idiomatic meaning of the word would be a
partial function that is only defined on elements that are in the denotation of the idiomatic
meaning of \bsp{strings}. This analysis predicts syntactic flexibility for decomposable idioms, just
as proposed in \citet{WSN84a-u}. \indexgpsgend
%Related to this general dilemma are two more concrete analytical challenges, which \citet[\page 12]{Bargmann:Sailer:18} call the \emph{compositional challenge} and the \emph{collocational challenge}. The compositional challenge consists in associating a sequence of words with a non"=literal, idiomatic, meaning. The collocational challenge consists in making sure that the components of an idiom all occur together in the right constellation.
%\section{Predecessors to HPSG analyses of idioms}
%\label{Sec-Predecessors}
%\begin{itemize}
%\item Generalized Phrase Structure Grammar \citep{GKPS85a}
%\item Semi-formal, influential papers \citep{WSN84a-u,NSW94a}
%\item Construction Grammar \citep{FKoC88a}
%\end{itemize}
\citet[511--514]{NSW94a} show that the connection between semantic decomposability and syntactic flexibility is not as straightforward as suggested. They say that, in \ili{German} and \ili{Dutch}, ``noncompositional\is{idiom!non-decomposable} idioms are syntactically versatile'' \citep[514]{NSW94a}. Similar observations have been brought forward for \ili{French} in \citet{Ruwet:91}.
\citet{Bargmann:Sailer:18} and \citet{Fellbaum:19} argue that even for \ili{English}, passive examples are attested for non"=decomposable idioms such as \pref{ex-kick-fellbaum}.
\ea
\label{ex-kick-fellbaum}
Live life to the fullest, you never know when the bucket will be kicked.\footnote{
\citew[756]{Fellbaum:19}
}
\z
The current state of our knowledge of the relation between syntactic and semantic idiosyncrasy is that the semantic idiomaticity of an idiom does have an effect on its syntactic flexibility, though the relation is less direct than assumed in the literature based on \citew{WSN84a-u} and \citew{NSW94a}.
\section{HPSG analyses of idioms}
\label{Sec-Analyses}
%\subsection{HPSG-specific research questions}
\largerpage
HPSG does not make a core-periphery\is{core grammar} distinction; see \citet{MuellerKernigkeit}. Consequently, idioms belong to the empirical domain to be covered by an HPSG grammar.
Nonetheless, idioms are not discussed in \citet{ps2} and their architecture of grammar does not have a direct place for an analysis of idioms.%
\footnote{This section follows the basic structure and argument of \citet{Sailer:12} and \citet{Richter:Sailer:14}.}
They situate all idiosyncrasy in the lexicon, which consists of lexical entries for basic words.
Every word has to satisfy a lexical entry and all principles of grammar; see \crossrefchaptert{lexicon}.%
\footnote{I refer to the lexicon\is{lexicon} in the technical sense as the collection of lexical entries, i.e., as \emph{descriptions}, rather than as a collection of lexical items, i.e., linguistic signs.
Since \citet{ps2} do not discuss morphological processes, their lexical entries describe full forms.
If there is a finite number of such lexical entries, the
lexicon can be expressed by a \emph{Word Principle}\is{principle!Word}, a constraint on words that contains a disjunction of all such lexical entries.
Once we include morphology, lexical rules, and
idiosyncratic, lexicalized phrases
in the picture, we need to refine this simplified view.
%
}
%
All properties of a phrase can be inferred from the properties of the lexical items occurring in the phrase and the constraints of grammar.
In their grammar, \citet{ps2} adhere to the \emph{Strong Locality Hypothesis}
(SLH),\is{locality!Strong Locality Hypothesis}\is{locality|(} i.e., all lexical entries describe
leaf nodes in a syntactic structure and all phrases are constrained by principles that only refer to
local (i.e., \type{synsem}) properties of the phrase and to local properties of its immediate
daughters. This hypothesis is summarized in \refer{slh}.
%\vbox{
\eanoraggedright
Strong Locality Hyphothesis\label{slh} (SLH)\\
The rules and principles of grammar are statements on a single node of a linguistic structure or on nodes that are immediately dominated by that node.\is{SLH|see{locality!Strong Locality Hypothesis}}
\z
%}
This precludes any purely phrasal approaches to idioms. Following the heritage of GPSG\indexgpsg,
we would assume that all regular aspects of linguistic expressions can be handled by mechanisms that
follow the SLH, whereas idiomaticity would be a range of phenomena that may violate it. It is,
therefore, remarkable that a grammar framework that denies a core-periphery distinction would start
with a strong assumption of locality, and, consequently, of regularity.
\largerpage[1.3]
This is in sharp contrast to the basic motivation of Construction Grammar\indexcxg, which assumes
that constructions can be of arbitrary depth and of an arbitrary degree of idiosyncrasy.
\citet{FKoC88a} use idiom data and the various types of idiosyncrasy discussed in
Section~\ref{Sec-EmpiricalDomain} as an important motivation for this assumption. To contrast this
position clearly with the one taken in \citet{ps2}, I will state the \emph{Strong Non"=locality
Hypothesis}\is{locality!Strong Non"=locality Hypothesis} (SNH) in \refer{snh}.
%\vbox{
\eanoraggedright
Strong Non"=locality Hypothesis\is{locality!Strong Non"=locality Hypothesis} (SNH)\is{SNH|see{locality!Strong Non-locality Hypothesis}}\label{snh}\\
The internal structure of a construction can be arbitrarily deep and show an arbitrary degree of irregularity at any substructure.
\z
%}
The actual formalism used in \citet{ps2} and \citet{King89} -- see
\crossrefchaptert{formal-background} -- does not require the strong versions of the locality and the
non"=locality hypotheses, but is compatible with weaker versions. I will call these the \emph{Weak
Locality Hypothesis} (WLH),\is{locality!Weak Locality Hypothesis} and the \emph{Weak Non"=locality
Hypothesis} (WNH); see \refer{wlh} and \refer{wnh} respectively.
%\vbox{
\eanoraggedright
Weak Locality Hypothesis (WLH)\label{wlh}\\
At most the highest node in a structure is licensed by a rule of grammar or a lexical entry.\is{WLH|see{locality!Weak Locality Hypothesis}}
%The rules and principles of grammar can constrain the internal structure of a linguistic sign at arbitrary depth, but each sign needs to be licensed independently.
\z
%}
According to the WLH, just as in the SLH, each sign needs to be licensed by the lexicon and/""or the grammar.
This precludes any \emph{en bloc}-insertion analyses, which would be compatible with the SNH.
%
According to the WNH, in line with the SLH, a sign can, however, impose further constraints on its
component parts, that may go beyond local (i.e., \type{synsem}) properties of its immediate
daughters.
\eanoraggedright
Weak Non"=locality Hypothesis\is{locality!Weak Non-locality Hypothesis} (WNH)\is{WNH|see{locality!Weak Non-locality Hypothesis}}\label{wnh}\\
The rules and principles of grammar can constrain -- though not license -- the internal structure of a linguistic sign at arbitrary depth.
\z
This means that all substructures of a syntactic node need to be licensed by the grammar, but the
node may impose idiosyncratic constraints on which particular well-formed substructures it may
contain.
%\bigskip%
In this section, I will review four types of analyses developed within HPSG in a mildly
chronological order: First, I will discuss a conservative extension of \citet{ps2} for idioms
\citep{KE94a} that sticks to the SLH. Then, I will look at attempts to incorporate constructional
ideas more directly, i.e., ways to include a version of the SNH. The third type of approach will
exploit the WLH. Finally, I will summarize recent approaches, which are, again, emphasizing the
locality of idioms.
\is{locality|)}
\subsection{Early lexical approaches}
\label{Sec-EarlyLexical}
\largerpage[1.3]
\citet{KE94a}, based on \citet{Erbach92a}, present the first comprehensive HPSG account of idioms.
They look at a wide variety of different types of \ili{German} idioms, including support verb
constructions. They only modify the architecture of \citet{ps2} marginally and stick to the Strong
Locality Hypothesis. They base their analysis on the apparent correlation between syntactic
flexibility and semantic decomposability from \citet{WSN84a-u} and \citet{NSW94a}. Their analysis
is a representational variant of the analysis in \citet{GKPS85a}.
To maintain the SLH, \citet{KE94a} assume that the information available in syntactic selection is
slightly richer than what has been assumed in \citet{ps2}: first, they use a lexeme-identification
feature, \attrib{lexeme}, which is located inside the \attrib{index} value and whose value is the
semantic constant associated with a lexeme. Second, they include a feature \feat{theta-role}, whose
value indicates which thematic role a sign is assigned in a structure. In addition to standard
thematic roles, they include a dummy value \type{nil}. Third, as the paper was written in the
transition phase between \citet{ps} and \citet{ps2}, they assume that the selectional attributes
contain complete \type{sign} objects rather than just \type{synsem} objects. Consequently,
selection for phonological properties and internal constituent structure is possible, which we could
consider a violation of the SLH.
The effect of these changes in the analysis of idioms can be seen in \refer{ke-spill} and \refer{ke-kick}. In \refer{ke-spill}, I sketch the analysis of the syntactically flexible, decomposable idiom \emph{spill the beans}. There are individual lexical items for the idiomatic words.%
\footnote{We do not need to specify the \attrib{rel} value for the noun \emph{beans}, as the \attrib{lexeme} and the \attrib{rel} value are usually identical.}
\eal % Analysis of \emph{spill the beans} in the spirit of \citet{KE94a}
\label{ke-spill}
\ex
\label{le-idiomatic-spill}
\avm{
[phon < \phonfont{spill} > \\
synsem|loc [cat & [subj & < NP >\\
comps & < NP, NP[lexeme & beans\_i] >] \\
cont & [rel & spill\_i] ] ]
}
\ex
\avm{
[phon < \phonfont{beans} > \\
synsem|loc|content|index|lexeme \type{beans\_i} ]
}
\zl
\noindent
The \attrib{lexeme} values of these words can be used to distinguish them from their ordinary,
non"=idiomatic homonyms. Each idiomatic word comes with its idiomatic meaning, which models the
decomposability of the expression. For example, the lexical items satisfying the entry in
\refer{le-idiomatic-spill} can undergo lexical rules such as passivization.
\largerpage[1]
The idiomatic verb \emph{spill} selects an NP complement with the \attrib{lexeme} value
\type{beans\_i}. The lexicon is built in such a way that no other word selects for this
\attrib{lexeme} value. This models the lexical fixedness of the idiom.
The choice of putting the lexical identifier into the \attrib{index} guarantees that it is shared
between a lexical head and its phrase, which allows for syntactic flexibility inside the NP.
Similarly, the information shared between a trace and its antecedent contains the \attrib{index}
value. Consequently, participation in unbounded dependency constructions is equally accounted for.
Finally, since a pronoun has the same \attrib{index} value as its antecedent, pronominalization as in
(\ref{ex-pron-spill}) is also possible.
\ea
\label{ex-pron-spill}
Eventually, she spilled all the beans$_i$. But it took her a few days to spill them$_i$
all.\footnote{
\citew[207]{Riehemann2001a}
}
\z
I sketch the analysis of a non"=decomposable, fixed idiom, \emph{kick the bucket}, in
\refer{ke-kick}. In this case, there is only a lexical entry of the syntactic head of the idiom,
the verb \emph{kick}. It selects the full \isi{phonology} of its complement. This blocks any syntactic
processes inside this NP. It also follows that the complement cannot be realized as a trace, which
blocks extraction.\footnote{See \crossrefchaptert{udc} for
details on the treatment of extraction in HPSG.}
%
The special \feat{theta-role} value \type{nil} will be used to restrict the lexical rules that can
be applied. The passive lexical rule, for example, would be specified in such a way that it cannot
apply if the NP complement in its input has this theta-role.
\ea % Analysis of \emph{spill the beans} in the spirit of \citet{KE94a}
\label{ke-kick}
\avm{
[phon < \phonfont{kick} > \\
synsem|loc [cat & [subj & < NP > \\
comps & < NP[\punk{phon}{\phonliste{ the, bucket }} \\
theta-role & nil] > ] \\
cont & [rel & die] ] ]
}
\z
With this analysis, \citet{KE94a} capture both the idiosyncratic aspects and the regularity of idioms.
They show how it generalizes to a wide range of idiom types.
I will briefly mention some problems of the approach, though.
%There are, however, a number of problems. I will just mention few of them here.
There are two problems for the analysis of non"=decomposable idioms. First, the approach is too
restrictive with respect to the syntactic flexibility of \emph{kick the bucket}, as it excludes
cases such as \emph{kick the social/figurative bucket}, which are discussed in \citet{Ernst:81}.
Second, it is built on equating the class of non"=decomposable idioms with that of semi-fixed
idioms. As shown in my discussion around example \pref{ex-kick-fellbaum}, this cannot be maintained.
\largerpage
There are also some undesired properties of the \textsc{lexeme} value selection. The index identity
between a pronoun and its antecedent would require that the subject of the relative
clause\is{relative clause} in \refer{strings-relcl} has the same \attrib{index} value as the head noun
\emph{strings}. However, the account of the lexical fixedness of idioms is built on the assumption
that no verb except for the idiomatic \emph{pull} selects for an argument with \attrib{lexeme} value
\type{strings\_i}.\footnote{\citet{Pulman:93} discusses an analogous problem for the denotational
theory of \citet{GKPS85a}.}
\ea \label{strings-relcl}
Parky pulled the strings that got me the job.
\citep[137]{McCawley:81}
\z
Notwithstanding these problems, the analytic ingredients of \citet{KE94a} constitute the basis of
later HPSG analyses. In particular, a mechanism for lexeme-specific selection has been widely
assumed in most approaches. The attribute \feat{theta-role} can be seen as a simple form of an
\emph{inside-out} mechanism\is{inside-out constraint}, i.e., as a mechanism of encoding information
about the larger structure within which a sign appears.
%
%\citet{SS2003a}: inside out,
%\citet{Sag2012a}
\subsection{Phrasal approach}
\label{Sec-Phrasal}
\largerpage[1]
\is{Constructional HPSG|(}%
With the advent of constructional analyses within HPSG, starting with
\citet{Sag97a}, it is natural to expect phrasal accounts of idioms to emerge as well, as
idiomaticity is a central empirical domain for Construction Grammar\indexcxg; see
\crossrefchaptert{cxg}. In this version of HPSG, there is an elaborate type hierarchy below
\type{phrase}. \citet{Sag97a} also introduces \emph{defaults}\is{default} into HPSG, which play an
important role in the treatment of idioms in Constructional HPSG. The clearest phrasal approach to
idioms can be found in \citet{Riehemann2001a}, which incorporates insights from earlier publications
such as \citet{Riehemann97a} and \citet{RB99a}.
%
The overall framework of \citet{Riehemann2001a} is Constructional HPSG with \emph{Minimal Recursion Semantics}\indexmrs \citep{CFMRS95a-u,CFPS2005a}; see also \crossrefchaptert[Section~\ref{semantics:sec-mrs}]{semantics}.
\begin{sloppypar}
For Riehemann, idioms are phrasal units. Consequently, she assumes a subtype of \type{phrase} for each idiom, such as \type{spill-beans-idiomatic-phrase} or \type{kick-bucket-idiomatic-phrase}. The proposal in \citet{Riehemann2001a} simultaneously is phrasal and obeys the SLH. To achieve this, \citet{Riehemann2001a} assumes an attribute \attrib{words}, whose value contains all words dominated by a phrase. This makes it possible to say that a phrase of type \type{spill-beans-idiomatic-phrase} dominates the words \emph{spill} and \emph{beans}. This is shown in the relevant type constraint for the idiom \emph{spill the beans} in \refer{sr-spillbeans}.%
\footnote{The percolation mechanism for the feature \attrib{words} is rather complex.
%In fact, in \citew[Section 5.2.1]{Riehemann2001a} the idiom-specific words appear within a \feat{c-words} value and the other words are dominated by the idiomatic phrase in the value of an attribute \feat{other-words}, both of which together form the value of \attrib{words}. While all the values of these features are subject to local percolation principles,
The fact that entire words are percolated undermines the locality intuition behind the SLH.
}
\end{sloppypar}
\vbox{
\ea Constraint on the type \type{spill-beans-idiomatic-phrase} from \citet[185]{Riehemann2001a}:\label{sr-spillbeans}\\
\avm{
[\type*{spill-beans-ip}
words & \{ [\type*{i\_spill}
\ldots{} liszt & <[\type*{i\_spill\_rel}
undergoer & \1]> ] \srdefault
[\ldots{} liszt & <\type{\_spill\_rel}>],\\
[\type*{i\_beans}
\ldots{} liszt & <[\type*{i\_beans\_rel}
inst & \1]> ] \srdefault
[\ldots{} liszt & <\type{\_beans\_rel}>], \ldots{}
\}
]
}
\z
}
\noindent
The \attrib{words} value of the idiomatic phrase contains at least two elements, the idiomatic words
of type \type{i\_spill} and \type{i\_beans}. The special symbol {\scriptsize \srdefault} used in
this constraint expresses a default\is{default}. It says that the idiomatic version of the word
\emph{spill} is just like its non"=idiomatic homonym, except for the parts specified in the
left"=hand side of the default. In this case, the type of the words and the type of the semantic
predicate contributed by the words are changed. \citet{Riehemann2001a} only has to introduce the
types for the idiomatic words in the type hierarchy but need not specify type constraints on the
individual idiomatic words, as these are constrained by the default statement within the constraints
on the idioms containing them.
\largerpage
As in the account of \citet{KE94a}, the syntactic flexibility of the idiom follows from its free
syntactic combination and the fact that all parts of the idiom are assigned an independent semantic
contribution. The lexical fixedness is a consequence of the requirement that particular words are
dominated by the phrase, namely the idiomatic versions of \emph{spill} and \emph{beans}.
The appeal of the account is particularly clear in its application to non"=de\-com\-posable,
semi-fixed idioms such as \emph{kick the bucket} \citep[\page 212]{Riehemann2001a}. For such
expressions, the idiomatic words that constitute them are assumed to have an empty semantics and the
meaning of the idiom is contributed as a constructional semantic contribution only by the idiomatic
phrase. Since the \attrib{words} list contains entire words, it is also possible to require that the
idiomatic word \emph{kick} be in active voice and/""or that it take a complement compatible with the
description of the idiomatic word \emph{bucket}. This analysis captures the syntactically regular
internal structure of this type of idioms and is compatible with the occurrence of modifiers such as
\emph{proverbial}. At the same time, it prevents passivization and excludes extraction of the
complement as the \synsemv of the idiomatic word \emph{bucket} must be on the \compsl of the
idiomatic word \emph{kick}.\footnote{%
This assumes that extracted elements are not members of the valence lists. See
\crossrefchapterw[\pageref{udc:ex-slashed-verb-traceless}]{udc} for details.
}
Riehemann's approach clearly captures the intuition of idioms as phrasal units much better than any
other approach in HPSG. However, it faces a number of problems. First, the integration of the
approach with Constructional HPSG is done in such a way that the phrasal types for idioms are
cross-classified in complex type hierarchies with the various syntactic constructions in which the
idiom can appear. This allows Riehemann to account for idiosyncratic differences in the syntactic
flexibility of idioms, but the question is whether such an explicit encoding misses generalizations
that should follow from independent properties of the components of an idiom and/""or of the
syntactic construction -- in line with the quote from \citet{NSW94a} on page \pageref{NSW-quote}.
Second, the mechanism of percolating dominated words to each phrase is not compatible with the
intuitions of most HPSG researchers. Since no empirical motivation for such a mechanism aside from
idioms is provided in \citet{Riehemann2001a}, this idea has not been pursued in other papers.
Third, the question of how to block the free occurrence of idiomatic words, i.e., the occurrence of
an idiomatic word without the rest of the idiom, is not solved in \citet{Riehemann2001a}. While the
idiom requires the presence of particular idiomatic words, the occurrence of these words is not
restricted.\footnote{Since the problem of free occurrences of idiomatic words is not an issue for
parsing, versions of Riehemann's approach have been integrated into practical parsing systems
\citep{Villavicencio:Copestake:02}; see \crossrefchaptert{cl}. Similarly, the approach to idioms
sketched in \citet{Flickinger:15Slides2} is part of a system for parsing and machine
translation\is{machine translation}. Idioms in the source language are identified by bits of
semantic representation -- analogous to the elements in the \attrib{words} set. This approach,
however, does not constitute a theoretical modeling of idioms; it does not exclude ill-formed uses
of idioms but identifies potential occurrences of an idiom in the output of a parser.} Note that
idiomatic words may sometimes be found without the other elements of the idiom -- evidenced by
expressions such as in \emph{bucket list} `list of things to do before one dies'. Such data may be
considered as support of Riehemann's approach; however, the extent to which we find such free
occurrences of idiomatic words is extremely small.\footnote{See the discussion around \pref{trice}
for a parallel situation with bound words.}
Before closing this subsection, I would like to point out that \citet{Riehemann2001a} and
\citet{RB99a} are the only HPSG papers on idioms that address the question of statistical
idiomaticity\is{idiomaticity!statistical}, based on the variationist study in \citet{Bender2000a}.
In particular, \citet[\page 297--301]{Riehemann2001a} proposes phrasal constructions for
collocations even if these do not show any lexical, syntactic, semantic, or pragmatic idiosyncrasy
but just a statistical co-occurrence preference. She extends this into a larger plea for an
\emph{experience-based HPSG}\is{experience-based HPSG}.
%
\citet{Bender2000a} discusses the same idea under the notions of \emph{minimal} versus
\emph{maximal} grammars, i.e., grammars that are as free of redundancy as possible to capture the
grammatical sentences of a language with their correct meaning versus grammars that might be open to
a connection with usage-based approaches\is{usage-based grammar} to language modeling. \citet[\page
292]{Bender2000a} sketches a version of HPSG with frequencies/probabilities attached to lexical and
phrasal types.\footnote{An as-yet unexplored solution to the problem of free occurrence of idiomatic
words within an experience-based version of HPSG could be to assign the type \type{idiomatic-word}
an extremely low probability of occurring. This might have the effect that such a word can only be
used if it is explicitly required in a construction. However, note that neither
defaults\is{default} nor probabilities are well-defined part of the formal foundations of
theoretical work on HPSG; see \crossrefchaptert{formal-background}.}\is{Constructional HPSG|)}
\subsection{Mixed lexical and phrasal approaches}
\label{Sec-Mixed}
While \citet{Riehemann2001a} proposes a parallel treatment of decomposable and non"=decomposable
idioms -- and of flexible and semi-fixed idioms -- the division between fixed and non"=fixed
expressions is at the core of another approach, the \emph{two-dimensional theory of
idioms}\is{two-dimensional theory of idioms}. This approach was first outlined in
\citet{Sailer2000a} and referred to under this label in
\citet{Richter:Sailer:09,Richter:Sailer:14}. It is intended to combine constructional and
collocational approaches to grammar.
The basic intuition behind this approach is that signs have internal and external properties. All
properties that are part of the feature structure of a sign are called \emph{internal}. Properties
that relate to larger feature structures containing this sign are called its \emph{external}
properties. The approach assumes that there is a notion of \emph{regularity} and that anything
diverging from it is \emph{idiosyncratic} -- or idiomatic, in the terminology of this chapter.
This approach is another attempt to reify the GPSG\indexgpsg analysis
within HPSG. \citet{Sailer2000a} follows the distinction of \citet{NSW94a} into non"=decomposable
and non"=flexible idioms on the one hand and decomposable and flexible idioms on the other. The
first group is considered internally irregular and receives a constructional analysis in terms of a
\emph{phrasal lexical entry}\is{phrasal lexical entry}. The second group is considered to consist of
independent, smaller lexical units that show an external irregularity in being constrained to
co-occur within a larger structure. Idioms of the second group receive a collocational
analysis. The two types of irregularity are connected by the \emph{Predictability Hypothesis}, given
in \refer{PredHypo}.
\vbox{
\eanoraggedright
\label{PredHypo}
Predictability Hypothesis\is{Predictability Hypothesis} \citep[\page 366]{Sailer2000a}:\\
For every sign whose internal properties are fully predictable, the distributional
behavior of this sign is fully predictable as well.
\z
}
\noindent
In the most recent version of this approach, \citet{Richter:Sailer:09,Richter:Sailer:14}, there is a
feature \attrib{coll} defined on all signs. The value of this feature specifies the type of internal
irregularity. The authors assume a cross-classification of regularity and irregularity with respect
to syntax, semantics, and phonology -- ignoring pragmatic and statistical (ir)regularity in their
paper. Every basic lexical entry is defined as completely irregular, as its properties are not
predictable. Fully regular phrases such as \emph{read a book} have a trivial value of \attrib{coll}.
A syntactically internally regular but fixed idiom such as \emph{kick the bucket} is classified as
having only semantic irregularity, whereas a syntactically irregular expression such as \emph{trip
the light fantastic} is of an irregularity type that is a subsort of syntactic and semantic
irregularity, but not of phonological irregularity. Following the terminology of \citet{FKoC88a},
this type is called \type{extra-grammatical-idiom}.
%
The phrasal lexical entry for \emph{trip the light fantastic} is sketched in \refer{rs-trip},
adjusted to the feature geometry of \citet{Sag97a}.
\vbox{
\ea Phrasal lexical entry for the idiom \emph{trip the light fantastic}:\label{rs-trip}\\
% todo avm
\avm{
[\type*{headed-phrase}
phon \1 \+ <\type{the, light, fantastic}> \\
synsem|loc [ cat & [ head & \2 \\
listeme & trip-the-light-fantastic \\
subj & < \3 [\ldots{}{} index & \4] > \\
comps & < > ] \\
cont & [\type*{trip-light-fant}
dancer & \4] ] \\
head-dtr [phon \1 \\
synsem|loc [ cat & [ head & \2 verb \\
listeme & trip \\
subj & < \3 > \\
comps & < > ] ] ] \\
coll \type{extra-grammatical-idiom}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ]
}
\z
}
In \refer{rs-trip}, the constituent structure of the phrase is not specified, but the phonology is
fixed, with the exception of the head daughter's phonological contribution. This accounts for the
syntactic irregularity of the idiom. The semantics of the idiom is not related to the semantic
contributions of its components, which accounts for the semantic idiomaticity.
\citet{Soehn2006a} applies this theory to \ili{German}\il{German}. He solves the problem of the
relatively large degree of flexibility of non"=decomposable idioms in \ili{German} by using
underspecified descriptions of the constituent structure dominated by the idiomatic phrase.
\largerpage
For decomposable idioms, the two-dimensional theory assumes a collocational component. This
component is integrated into the value of an attribute \attrib{req}, which is only defined on
\type{coll} objects of one of the irregularity types. This encodes the Predictability Hypothesis.
%
The most comprehensive version of this collocational theory is given in \citet{Soehn:09},
summarizing and extending ideas from \citet{Soehn2006a} and \citet{richter-soehn:2006}. Soehn
assumes that collocational requirements can be of various types: a lexical item can be constrained
to co-occur with particular \emph{licensers} (or collocates). These can be other lexemes, semantic
operators, or phonological units. In addition, the domain within which this licensing has to be
satisfied is specified in terms of syntactic barriers, i.e., syntactic nodes dominating the
externally irregular item.
To give an example, the idiom \emph{spill the beans} would be analyzed as consisting of two
idiomatic words \emph{spill} and \emph{beans} with special \attrib{listeme} values \type{spill-i} and
\mbox{\type{beans-i}}. The idiomatic verb \emph{spill} imposes a lexeme selection on its
complement. The idiomatic noun \emph{beans} has a non"=empty \attrib{req} value, which specifies that
it must be selected by a word with \attrib{listeme} value \type{spill-i} within the smallest complete
clause dominating it.
%\citet{Richter:Sailer:09,Richter:Sailer:14} look at idioms with
%\bigskip
The two-dimensional approach suffers from a number of weaknesses. First, it presupposes a notion of
regularity. This assumption is not shared by all linguists. Second, the criteria for whether an
expression should be treated constructionally or collocationally are not always clear. Idioms with
irregular syntactic structure need to be analyzed constructionally, but this is less clear for
non"=decomposable idioms with regular syntactic structure such as \emph{kick the bucket}.
%\begin{itemize}
%\item \citet{Sailer2000a}, \citet{Soehn2006a}, \citet{Richter:Sailer:09}
%\end{itemize}
\subsection{Recent lexical approaches}
\label{Sec-RecentLexical}\label{idioms:recent-lexical-approaches}
\largerpage
\enlargethispage{3pt}
\citet{KSF2015a} marks an important re-orientation in the analysis of idioms: the lexical analysis
is extended to all syntactically regular idioms, i.e., to both decomposable (\emph{spill the beans})
and non"=decomposable idioms (\emph{kick the bucket}).\footnote{This idea has been previously
expressed within a Minimalist perspective by \citew{Everaert:10} and G.\ \citew[\page
213--214]{GMueller2011a}. }
%\citet{KSF2015a} use Sign-based Construction Grammar\is{Sign-based Construction Grammar}
%
\citet{KSF2015a} achieve a lexical analysis of non"=decomposable idioms by two means: (i), an
extension of the HPSG selection mechanism, and (ii), the assumption of semantically empty idiomatic
words.
As in previous accounts, the relation among idiom parts is established through lexeme-specific
selection, using a feature \attrib{lid} (for \emph{lexical identifier}). The authors assume that
there is a difference between idiomatic and non"=idiomatic \attrib{lid} values. Only heads that are
part of idioms themselves can select for idiomatic words.
%Quote: Ordinary, non"=idiom predicators are lexically specified as requiring all members of their VAL list to be nonidiomatic.
For the idiom \emph{kick the bucket}, \citet{KSF2015a} assume that all meaning is carried by the
lexical head, an idiomatic version of \emph{kick}, whereas the other two words, \emph{the} and
\emph{bucket} are meaningless. This meaninglessness allows \citeauthor{KSF2015a} to block the idiom from
occurring in constructions which require meaningful constituents, such as questions,
\emph{it}-clefts, middle voice, and others. To exclude passivization, the authors assume that the
\ili{English} passive cannot apply to verbs selecting a semantically empty direct object.
The approach in \citet{KSF2015a} is a recent attempt to maintain the SLH as much as possible. Since
the SLH has been a major conceptual motivation for Sign-Based Construction Grammar\indexsbcg, \citeauthor{KSF2015a}'s paper is an important contribution showing the
empirical robustness of this assumption.
%\medskip%
\citet{Bargmann:Sailer:18} propose a similar lexical approach to non"=de\-com\-pos\-able idioms.
They take as their starting point the syntactic flexibility of semantically non"=decomposable idioms
in \ili{English} and, in particular, in \ili{German}. There are two main differences between
\citeauthor{KSF2015a}'s paper and \citeauthor{Bargmann:Sailer:18}'s: (i),
\citeauthor{Bargmann:Sailer:18} assume a collocational rather than a purely selectional mechanism to
capture lexeme restrictions of idioms, and (ii), they propose a redundant semantics rather than an
empty semantics for idiom parts in non"=decomposable idioms. In other words,
\citet{Bargmann:Sailer:18} propose that both \emph{kick} and \emph{bucket} contribute the semantics
of the idiom \emph{kick the bucket}. \citeauthor{Bargmann:Sailer:18} argue that the semantic
contributions of parts of non"=decomposable, syntactically regular idioms are the same across
languages, whereas the differences in syntactic flexibility are related to the different syntactic,
semantic, and pragmatic constraints imposed on various constructions. To give just one example,
while there are barely any restrictions on passive subjects in \ili{German}, there are strong
discourse-structural constraints on passive subjects in \ili{English}.
\largerpage[1.3]
Both \citet{KSF2015a} and \citet{Bargmann:Sailer:18} attempt to derive the (partial) syntactic
inflexibility of non"=decomposable idioms from independent properties of the relevant constructions.
As such, they subscribe to the programmatic statement of \citet{NSW94a} quoted on page
\pageref{NSW-quote}. In this respect, the extension of the lexical approach from decomposable
idioms to all syntactically regular expressions has been a clear step forward.
%\bigskip%
\citet{Findlay:17} provides a recent discussion and criticism of lexical approaches to idioms in
general, which applies in particular to non"=decomposable expressions. His reservations comprise
the following points. First, there is a massive proliferation of lexical entries for otherwise
homophonous words.
%Is is unclear, for example, if a separate definite article is needed for each idiom which contains one, i.e., it might turn out that we need different lexical entries for the word \emph{the} in \emph{kick the bucket}, \emph{shoot the breeze}, and \emph{shit hits the fan}.%
%
Second, the lexical analysis does not represent idioms as units, which might make it difficult to
connect their theoretical treatment with processing evidence. Findlay refers to psycholinguistic
studies, such as \citet{Swinney:Cutler:79}, that point to a faster processing of idioms than of free
combinations. While the relevance of processing arguments for an HPSG analysis are not clear, I
share the basic intuition that idioms, decomposable or not, are a unit and that this should be part
of their linguistic representation.
\section{Where to go from here?}
\label{Sec-WhereToGo}
The final section of this article contains short overviews of research that has been done in areas
of phraseology that are outside the main thread of this chapter. I will also identify desiderata.
%In this final section, I would like to point to directions for future research.
\subsection{Neglected phenomena}
\label{Sec-Neglected}
Not all types of idioms or idiomaticity mentioned in Section~\ref{Sec-EmpiricalDomain} have received
an adequate treatment in the (HPSG) literature. I will briefly look at three empirical areas that
deserve more attention: neglected types of idiom variation, phraseological patterns, and the literal
and non"=literal meaning components of idioms.
%\bigskip%Variation
Most studies on idiom variation have looked at verb- and sentence-related syntactic constructions,
such as passive and topicalization. However, not much attention has been paid to lexical variation
in idioms. This variation is illustrated by the following examples from \citet[\page 184,
191]{Richards:01}.
\eal \label{creeps}
\ex The Count gives everyone the creeps.
\ex You get the creeps (just looking at him).
\ex I have the creeps.
\zl
\noindent
In~\refer{creeps}, the alternation of the verb seems to be very systematic -- and has been used by
\citet{Richards:01} to motivate a lexical decomposition of the involved verbs. A similar argument has been
made in \citet{Mateu:Espinal:07} for similar idioms in \ili{Catalan}. We are lacking systematic,
larger empirical studies of this type of substitution, and it would be important to see how it can
be modeled in HPSG. One option would be to capture the \emph{give}--\emph{get}--\emph{have}
alternation(s) with lexical rules. Such lexical rules would be different from the standard cases,
however, as they would change the lexeme itself rather than just alternating its morpho-syntactic
properties or its semantic contribution.
In the case mentioned in footnote~\ref{fn-semmeln}, the alternation consists of substituting a word
with a (near) synonym and keeping the meaning of the idiom intact. Again, HPSG seems to have all the
required tools to model this phenomenon -- for example, by means of hierarchies of
\textsc{lid} values. However, the extent of this phenomenon across the set of idioms is not
known empirically.
%\medskip%
Concerning syntactic variation, the nominal domain has not yet received the attention it might
deserve. There is a well-known variation with respect to the marking of possession within idioms.
This has been documented for \ili{English}\il{English} in \citet{Ho:15}, for Modern
\ili{Hebrew}\il{Hebrew} in \citet{Almog:12}, and for Modern \ili{Greek} and \ili{German}\il{German}
in \citet{Markantonatou:Sailer:16}. In \ili{German}, we find a relatively free alternation between
a plain definite and a possessive; see \refer{ex-verstand}. This is, however, not possible with all
idioms; see \refer{ex-frieden}.
\eal \label{ex-verstand-herz}
\ex
\gll Alex hat den / seinen Verstand verloren.\\
Alex has the {} his mind lost\\
\glt `Alex lost his mind.'\label{ex-verstand}
\ex
\gll Alex hat *den / ihren Frieden mit der Situation gemacht.\\
Alex has \hphantom{*}the {} her peace with the situation made\\
\glt `Alex made her peace with the situation.'\label{ex-frieden}
\zl
We can also find a free dative in some cases, expressing the possessor. In \refer{ex-herz}, a
dative possessor may co-occur with a plain definite or a coreferential possessive determiner; in
\refer{ex-augen}, only the definite article but not the possessive determiner is possible.
\eal
\label{ex-herz-augen}
\ex
\gll Alex hat mir das / mein Herz gebrochen.\\
Alex has me.\textsc{dat} the {} my heart broken\\
\glt `Alex broke my heart.'\label{ex-herz}
\ex
\gll Alex sollte mir lieber aus den / *meinen Augen gehen.\\
Alex should me.\textsc{dat} rather {out of} the {} \hphantom{*}my eyes go\\
\glt `Alex should rather disappear from my sight.'\label{ex-augen}
\zl
\noindent
While they do not offer a formal encoding, \citet{Markantonatou:Sailer:16} observe that a particular
encoding of possession in idioms is only possible if it would also be possible in a free
combination. However, an idiom may be idiosyncratically restricted to a subset of the realizations
that would be possible in a corresponding free combination. A formalization in HPSG might consist of
a treatment of possessively used definite determiners, combined with an analysis of free datives as
an extension of a verb's argument structure.%
\footnote{See \citew{Koenig1999b} for an analysis of possessively interpreted definites and
\citew[68]{MuellerLFGphrasal} for an extension of the argument structure as suggested in the main
text.
}
Related to the question of lexical variation are \emph{phraseological
patterns}\is{phraseological pattern}, i.e., very schematic idioms in which the lexical material is largely free. Some
examples of phraseological patterns are the \emph{Incredulity Response Construction} as in
\emph{What, me worry?} \citep{Akmajian:84,Lambrecht:90}, or the \emph{What's X doing Y?}
construction \citep{KF99a}. Such patterns are of theoretical importance as they typically involve a
non"=canonical syntactic pattern. The different locality and non"=locality hypotheses introduced
above make different predictions. \citet{FKoC88a} have presented such constructions as a motivation
for the non"=locality of constructions, i.e., as support of a SNH. However, \citet{KF99a} show that
a lexical analysis might be possible for some cases at least, which they illustrate with the
\emph{What's X doing Y?} construction.
\citet{Borsley2004a} looks at another phraseological pattern, the \emph{the X-er the Y-er}
construction, or \emph{Comparative Correlative Construction}\is{comparative correlative} -- see
\crossrefchaptert[Section~\ref{coord:sec-comparative-correlatives}]{coordination} and
\crossrefchapterw[\pageref{udc:page-correlatives-start}--\pageref{udc:page-correlatives-end}]{udc}.
Borsley analyzes this construction by means of two special (local) phrase structure types: one for
the comparative \emph{the}-clauses, and one for the overall construction. He shows that (i), the
idiosyncrasy of the construction concerns two levels of embedding and is, therefore, non"=local;
however, (ii), a local analysis is still possible. This approach raises the question of whether the
WNH is empirically vacuous since we can always encode a non"=local construction in terms of a series
of idiosyncratic local constructions. Clearly, work on more phraseological patterns is needed to
assess the various analytical options and their consequences for the architecture of grammar.
A major charge for the conceptual and semantic analysis of idioms is the interaction between the
literal and the idiomatic meaning. I presented the basic empirical facts in
Section~\ref{Sec-EmpiricalDomain}. All HPSG approaches to idioms so far basically ignore the
literal meaning. This position might be justified, as an HPSG grammar should just model the
structure and meaning of an utterance and need not worry about the meta-linguistic relations among
different lexical items or among different readings of the same (or a homophonous) expression.
Nonetheless, this issue touches on an important conceptual point. Addressing it might immediately
provide possibilities to connect HPSG research to other disciplines and/""or frameworks like
Cognitive Linguistics, such as in \citet{Dobrovolskij:Piirainen:05}, and psycholinguistics.
\subsection{Challenges from other languages}
\label{Sec-OtherLanguages}
The majority of work on idioms in HPSG has been done on \ili{English} and \ili{German}.
%This led to a limitation of the possible phenomena that can be studied on idioms.
As discussed in Section~\ref{Sec-RecentLexical}, the recent trend in HPSG idiom research necessitates a detailed study of individual syntactic structures.
Consequently, the restriction to two closely related languages limits the possible phenomena that can be studied concerning idioms.
It would be essential to expand the empirical coverage of idiom analyses in HPSG to as many different languages as possible.
The larger degree of syntactic flexibility of \ili{French}, \ili{German}, and \ili{Dutch} idioms \citep{Ruwet:91,NSW94a,Schenk:95} has led to important refinements of the analysis in \citet{NSW94a} and, ultimately, to the lexical analyses of all syntactically regular idioms.
Similarly, the above-mentioned data on possessive alternations only become prominent when languages beyond \ili{English} are taken into account. Modern \ili{Greek}\il{Greek}, \ili{German}\il{German}, and many others
%mentioned above \refer{ex-herz-augen}
show the type of external possessor classified as a European areal phenomenon in \citet{Haspelmath:99}.
It would be important to look at idioms in languages with other types of external possessors.
In a recent paper, \citet{Sheinfux:al:19} provide data from Modern \ili{Hebrew}\il{Hebrew} that show that opacity and figurativity of an idiom are decisive for its syntactic flexibility, rather than decomposability.
This result stresses the importance of the literal reading for an adequate account of the syntactic behavior of idioms.
%
It shows that the inclusion of other languages can cause a shift of focus to other types of idioms or other types of idiomaticity.
To add just one more example, HPSG(-related) work on \ili{Persian} such as \citet{MuellerPersian}
and \citet{Samvelian:Faghiri:16} establishes a clear connection between complex predicates and
idioms. Their insights might also lead to a reconsideration of the similarities between light verbs
and idioms, as already set out in \citet{KE94a}.
As far as I can see, the following empirical phenomena have not been addressed in HPSG approaches to
idioms, as they do not occur in the main object languages for which we have idiom analyses, i.e.,
\ili{English} and \ili{German}. They are, however, common in other languages: the occurrence of
clitics\is{clitic} in idioms (found in \ili{Romance}\il{Romance} and \ili{Greek}\il{Greek});
aspectual\is{aspect} alternations in verbs (\ili{Slavic}\il{Slavic} and \ili{Greek}); argument
alternations other than passive and dative alternation, such as anti-passive\is{anti-passive},
causative\is{causative}, inchoative\is{inchoative}, etc.\ (in part found in \ili{Hebrew} and
addressed in \citealt{Sheinfux:al:19}); and displacement of idiom parts into special syntactic
positions (focus position in \ili{Hungarian}).
Finally, so far, idioms have usually been considered as either offering irregular structures or as
being more restricted in their structures than free combinations. In some languages, however, we
find archaic syntactic structures and function words in idioms that do not easily fit these two
analytic options. To name just a few, \citet{Lodrup:09} argues that \ili{Norwegian}\il{Norwegian}
used to have an external possessor construction similar to that of other European languages, which
is only conserved in some idioms. Similarly, \ili{Dutch}\il{Dutch} has a number of archaic case
inflections in multiword expressions \citep[\page 129]{Kuiper:18}, and there are archaic forms in
Modern \ili{Greek}\il{Greek} multiword expressions. It is far from clear what the best way would be
to integrate such cases into an HPSG grammar.
\section{Conclusion}
\label{Sec-Summary}
Idioms are among the topics in linguistics for which HPSG-related publications have had a clear
impact on the field and have been widely quoted across frameworks. This handbook article aimed at
providing an overview over the development of idiom analyses in HPSG. There seems to be a
development towards ever more lexical analyses, starting from the holistic approach for all idioms
in Chomsky's work, to a lexical account for all syntactically regular expressions. Notwithstanding
the advantages of the lexical analyses, I consider it a basic problem of such approaches that the
unit status of idioms is lost. Consequently, I think that the right balance between phrasal and
lexical aspects in the analysis of idioms has not yet been fully achieved.
The sign-based character of HPSG seems to be particularly suited for a theory of idioms as it allows
one to take into consideration syntactic, semantic, and pragmatic aspects and to use them to
constrain the occurrence of idioms appropriately.
% moved abbrevs and ackno before appendix to fill the page
\section*{Abbreviations}
\begin{tabular}{@{}ll}
GPSG & Generalized Phrase Structure Grammar \citep{GKPS85a}\\
%MRS & Minimal Recursion Semantics \citep{CFPS2005a}\\
%MWE & multiword expression\\
MGG & Mainstream Generative Grammar\\
SLH & Strong Locality Hypothesis\\%, see page \pageref{slh}\\
SNH & Strong Non"=locality Hypothesis\\%, see page \pageref{snh}\\
WLH & Weak Locality Hypothesis\\%, see page \pageref{wlh}\\
WNH & Weak Non"=locality Hypothesis\\%, see page \pageref{wnh}\\
\end{tabular}
\section*{\acknowledgmentsUS}
I have perceived Ivan A.\@ Sag\ia{Sag, Ivan A.} and his work with various colleagues as a major inspiration for my own work on idioms and multiword expressions.
This is clearly reflected in the structure of this paper, too.
I apologize for this bias, but I think it is legitimate within an HPSG handbook.
%
I am grateful to Jean-Pierre Koenig, Stefan Müller and Tom Wasow for comments on the outline and the first version of this chapter.
%
I would not have been able to format this chapter without the support of the Language Science Press team, in particular Sebastian Nordhoff.
%
I would like to thank Elizabeth Pankratz for comments and proofreading.
%\appendix
\section*{Appendix: List of used idioms}
\subsection*{English}
\begin{tabular}{@{}lL{3.8cm}L{3.8cm}@{}}
idiom & paraphrase & comment\\\hline
break the ice & relieve tension in a strained situation & figurative\\
brush one's teeth & clean one's teeth with a tooth brush & collocation, no idiomaticity\\
give s.o.\ the creeps & make s.o.\ feel uncomfortable & systematic lexical variation\\
Good morning! & (morning greeting) & formulaic expression\\
immaculate performance & perfect performance & statistical idiomaticity\\
in a trice & in a moment & bound word: \emph{trice}\\
kick the bucket & die & non"=decomposable\\
make headway & make progress & bound word: \emph{headway}\\
pull strings & exert influence/""use one's connections & flexible\\
saw logs & snore & {transparent, non"=decomposable, semi"=flexible}\\
shake hands & greet & body-part expression\\
shake one's head & decline/""negate & {body-part expression, possessive idiom}\\
shit hit the fan & there is trouble & {subject as idiom component, transparent/""figurative, non"=decomposable}\\
shoot the breeze & chat & non"=decomposable\\
spill the beans & reveal a secret & flexible\\
take a shower & clean oneself using a shower & collocation, light verb construction\\
take the bull by the horns & {approach a problem directly} & figurative expression\\
take umbrage & take offense & bound word: \emph{umbrage}\\
trip the light fantastic & dance & syntactically irregular\\
\end{tabular}
\subsection*{German}
\begin{sideways}
\begin{tabular}{@{}L{4cm}lL{4.0cm}L{4.0cm}}
idiom & gloss & translation & comment\\\hline
%
{den/seinen Verstand verlieren}
& {the/""one's mind lose}
& {lose one's mind}
& {alternation of possessor marking}
\\
%
{jdm.\ das Herz brechen} & s.o.\ the heart break & break s.o.'s heart
& {dative possessor and possessor alternation}\\
%
{jdm.\ aus den Augen gehen} & s.o.\ out of the eyes go
& {disappear from s.o.'s sight} &
{dative possessor, restricted possessor alternation}\\
%
{seinen Frieden machen mit}
& {one's peace make with}
& {make one's peace with}
& {no possessor alternation possible}\\
%
{wie warme Semmeln/""Brötchen/""Schrippen weggehen}
& {like warm rolls vanish} & sell like hotcakes &
{parts can be exchanged by synonyms}\\
\end{tabular}
\end{sideways}
\is{idiom|)}
{\sloppy
\printbibliography[heading=subbibliography,notkeyword=this] }
\end{document}
% <!-- Local IspellDict: en_US-w_accents -->
| {
"alphanum_fraction": 0.7821599453,
"avg_line_length": 63.3882149047,
"ext": "tex",
"hexsha": "d67aff22503070d8c8affef057503434eeb4a949",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2021-01-14T10:35:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-20T20:05:04.000Z",
"max_forks_repo_head_hexsha": "2b7aa5d3301f5b18061c9f76af311e3bebccca9a",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "langsci/259",
"max_forks_repo_path": "chapters/idioms.tex",
"max_issues_count": 177,
"max_issues_repo_head_hexsha": "2b7aa5d3301f5b18061c9f76af311e3bebccca9a",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T15:28:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-29T10:48:51.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "langsci/259",
"max_issues_repo_path": "chapters/idioms.tex",
"max_line_length": 659,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "50bbaaf0bd7b3f8779d4e7d3685bd4a0020dfa12",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "langsci/hpsg-handbook",
"max_stars_repo_path": "chapters/idioms.tex",
"max_stars_repo_stars_event_max_datetime": "2021-01-05T11:42:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-20T08:06:05.000Z",
"num_tokens": 19228,
"size": 73150
} |
\chapter{Hamiltonian and Observables}
\label{chap:hamiltobs}
\qmcpack is capable of the simultaneous measurement of the Hamiltonian and many other quantum operators. The Hamiltonian attains a special status among the available operators (also referred to as observables) because it ultimately generates all available information regarding the quantum system. This is evident from an algorithmic standpoint as well since the Hamiltonian (embodied in the projector) generates the imaginary time dynamics of the walkers in DMC and reptation Monte Carlo (RMC).
This section covers how the Hamiltonian can be specified, component by component, by the user in the XML format native to \qmcpack. It also covers the input structure of statistical estimators corresponding to quantum observables such as the density, static structure factor, and forces.
\section{The Hamiltonian}
The many-body Hamiltonian in Hartree units is given by
\begin{align}
\hat{H} = -\sum_i\frac{1}{2m_i}\nabla_i^2 + \sum_iv^{ext}(r_i) + \sum_{i<j}v^{qq}(r_i,r_j) + \sum_{i\ell}v^{qc}(r_i,r_\ell) + \sum_{\ell<m}v^{cc}(r_\ell,r_m)\:.
\end{align}
Here, the sums indexed by $i/j$ are over quantum particles, while $\ell/m$ are reserved for classical particles. Often the quantum particles are electrons, and the classical particles are ions, though \qmcpack is not limited in this way. The mass of each quantum particle is denoted $m_i$, $v^{qq}/v^{qc}/v^{cc}$ are pair potentials between quantum-quantum/quantum-classical/classical-classical particles, and $v^{ext}$ denotes a purely external potential.
\qmcpack is designed modularly so that any potential can be supported with minimal additions to the code base. Potentials currently supported include Coulomb interactions in open and periodic boundary conditions, the MPC potential, nonlocal pseudopotentials, helium pair potentials, and various model potentials such as hard sphere, Gaussian, and modified Poschl-Teller.
Reference information and examples for the \texttt{<hamiltonian/>} XML element are provided subsequently. Detailed descriptions of the input for individual potentials is given in the sections that follow.
% hamiltonian element
% dev notes
% Hamiltonian element read
% HamiltonianPool::put
% reads attributes: id name role target
% id/name is passed to QMCHamiltonian
% role selects the primary hamiltonian
% target associates to quantum particleset
% HamiltonianFactory::build
% reads attributes: type source default
% HamiltonianFactory cloning may be flawed for non-electron systems
% see HamiltonianFactory::clone
% aCopy->renameProperty(``e'',qp->getName());
% aCopy->renameProperty(psiName,psi->getName());
% the renaming may not work if dynamic particleset.name!=''e''
% lots of xml inputs are simply ignored if do not explicitly match (fix! here and elsewhere in the build tree)
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{hamiltonian} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{simulation, qmcsystem}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{pairpot extpot estimator constant}(deprecated)}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{name/id}$^o$ & text & \textit{anything}& h0 & Unique id for this Hamiltonian instance \\
& \texttt{type}$^o$ & text & & generic & \textit{No current function} \\
& \texttt{role}$^o$ & text & primary/extra & extra & Designate as primary Hamiltonian or not \\
& \texttt{source}$^o$ & text & \texttt{particleset.name} & i & Identify classical \texttt{particleset} \\
& \texttt{target}$^o$ & text & \texttt{particleset.name} & e & Identify quantum \texttt{particleset} \\
& \texttt{default}$^o$ & boolean & yes/no & yes & Include kinetic energy term implicitly \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\textbf{target:} Must be set to the name of the quantum \texttt{particleset}. The default value is typically sufficient. In normal usage, no other attributes are provided.}
\end{itemize}
% All-electron hamiltonian element
\begin{lstlisting}[style=QMCPXML,caption=All electron Hamiltonian XML element.]
<hamiltonian target="e">
<pairpot name="ElecElec" type="coulomb" source="e" target="e"/>
<pairpot name="ElecIon" type="coulomb" source="i" target="e"/>
<pairpot name="IonIon" type="coulomb" source="i" target="i"/>
</hamiltonian>
\end{lstlisting}
% Pseudopotential hamiltonian element
\begin{lstlisting}[style=QMCPXML,caption=Pseudopotential Hamiltonian XML element.]
<hamiltonian target="e">
<pairpot name="ElecElec" type="coulomb" source="e" target="e"/>
<pairpot name="PseudoPot" type="pseudo" source="i" wavefunction="psi0" format="xml">
<pseudo elementType="Li" href="Li.xml"/>
<pseudo elementType="H" href="H.xml"/>
</pairpot>
<pairpot name="IonIon" type="coulomb" source="i" target="i"/>
</hamiltonian>
\end{lstlisting}
\section{Pair potentials}
Many pair potentials are supported. Though only the most commonly used pair potentials are covered in detail in this section, all currently available potentials are listed subsequently. If a potential you desire is not listed, or is not present at all, feel free to contact the developers.
% pairpot element
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{pairpot} factory element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian}}\\
\multicolumn{2}{l}{type selector:} & \multicolumn{4}{l}{\texttt{type} attribute}\\
\multicolumn{2}{l}{type options: } & \multicolumn{2}{l}{coulomb } & \multicolumn{2}{l}{Coulomb/Ewald potential}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{pseudo } & \multicolumn{2}{l}{Semilocal pseudopotential}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{mpc } & \multicolumn{2}{l}{Model periodic Coulomb interaction/correction}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{cpp } & \multicolumn{2}{l}{Core polarization potential}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{skpot } & \multicolumn{2}{l}{\textit{Unknown}}\\
\multicolumn{2}{l}{shared attributes:} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textit{See above} & 0 & Select pairpot type \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for this pairpot\\
& \texttt{source}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles\\
& \texttt{target}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles \\
& \texttt{units}$^o$ & text & & hartree & \textit{No current function} \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\textbf{type:} Used to select the desired pair potential. Must be selected from the list of type options.}
\item{\textbf{name:} A unique name used to identify this pair potential. Block averaged output data will appear under this name in \texttt{scalar.dat} and/or \texttt{stat.h5} files.}
\item{\textbf{source/target:} These specify the particles involved in a pair interaction. If an interaction is between classical (e.g., ions) and quantum (e.g., electrons), \texttt{source}/\texttt{target} should be the name of the classical/quantum \texttt{particleset}.}
\item{Only \texttt{Coulomb, pseudo}, and \texttt{mpc} are described in detail in the following subsections. The older or less-used types (\texttt{cpp, skpot}) are not covered.}
\dev{
\item{Available only if \texttt{QMC\_CUDA} is not defined: \texttt{skpot}.}
\item{Available only if \texttt{OHMMS\_DIM==3}: \texttt{mpc, vhxc, pseudo}.}
\item{Available only if \texttt{OHMMS\_DIM==3} and \texttt{QMC\_CUDA} is not defined: \texttt{cpp}.}
}
\end{itemize}
% physical read by coulomb potentials
% potential is only for pressure estimator
% pairpot instances
% do coulomb, pseudo, mpc
\subsection{Coulomb potentials}
The bare Coulomb potential is used in open boundary conditions:
\begin{align}
V_c^{open} = \sum_{i<j}\frac{q_iq_j}{\abs{r_i-r_j}}\:.
\end{align}
When periodic boundary conditions are selected, Ewald summation is used automatically:
\begin{align}\label{eq:ewald}
V_c^{pbc} = \sum_{i<j}\frac{q_iq_j}{\abs{r_i-r_j}} + \frac{1}{2}\sum_{L\ne0}\sum_{i,j}\frac{q_iq_j}{\abs{r_i-r_j+L}}\:.
\end{align}
The sum indexed by $L$ is over all nonzero simulation cell lattice vectors. In practice, the Ewald sum is broken into short- and long-range parts in a manner optimized for efficiency (see Ref.~\cite{Natoli1995}) for details.
For information on how to set the boundary conditions, consult Section~\ref{chap:simulationcell}.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{pairpot type=coulomb} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{coulomb} & & Must be coulomb \\
& \texttt{name/id}$^r$ & text & \textit{anything}& ElecElec & Unique name for interaction\\
& \texttt{source}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles\\
& \texttt{target}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles\\
& \texttt{pbc}$^o$ & boolean & yes/no & yes & Use Ewald summation \\
& \texttt{physical}$^o$& boolean & yes/no & yes & Hamiltonian(yes)/observable(no) \\
\dev{& \texttt{forces} & boolean & yes/no & no & \textit{Deprecated} \\ }
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information
\begin{itemize}
\item{\textbf{type/source/target:} See description for the previous generic \texttt{pairpot} factory element.}
\item{\textbf{name:} Traditional user-specified names for electron-electron, electron-ion, and ion-ion terms are \texttt{ElecElec}, \texttt{ElecIon}, and \texttt{IonIon}, respectively. Although any choice can be used, the data analysis tools expect to find columns in \texttt{*.scalar.dat} with these names.}
\item{\textbf{pbc}: Ewald summation will not be performed if \texttt{simulationcell.bconds== n n n}, regardless of the value of \texttt{pbc}. Similarly, the \texttt{pbc} attribute can only be used to turn off Ewald summation if \texttt{simulationcell.bconds!= n n n}. The default value is recommended.}
\item{\textbf{physical}: If \texttt{physical==yes}, this pair potential is included in the Hamiltonian and will factor into the \texttt{LocalEnergy} reported by QMCPACK and also in the DMC branching weight. If \texttt{physical==no}, then the pair potential is treated as a passive observable but not as part of the Hamiltonian itself. As such it does not contribute to the outputted \texttt{LocalEnergy}. Regardless of the value of \texttt{physical} output data will appear in \texttt{scalar.dat} in a column headed by \texttt{name}.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for Coulomb interaction between electrons.]
<pairpot name="ElecElec" type="coulomb" source="e" target="e"/>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for Coulomb interaction between electrons and ions (all-electron only).]
<pairpot name="ElecIon" type="coulomb" source="i" target="e"/>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for Coulomb interaction between ions.]
<pairpot name="IonIon" type="coulomb" source="i" target="i"/>
\end{lstlisting}
\subsection{Pseudopotentials}
\label{sec:nlpp}
\qmcpack supports pseudopotentials in semilocal form, which is local in the radial coordinate and nonlocal in angular coordinates. When all angular momentum channels above a certain threshold ($\ell_{max}$) are well approximated by the same potential ($V_{\bar{\ell}}\equiv V_{loc}$), the pseudopotential separates into a fully local channel and an angularly nonlocal component:
\begin{align}
V^{PP} = \sum_{ij}\Big(V_{\bar{\ell}}(\abs{r_i-\tilde{r}_j}) + \sum_{\ell\ne\bar{\ell}}^{\ell_{max}}\sum_{m=-\ell}^\ell \operator{Y_{\ell m}}{\big[V_\ell(\abs{r_i-\tilde{r}_j}) - V_{\bar{\ell}}(\abs{r_i-\tilde{r}_j}) \big]}{Y_{\ell m}} \Big)\:.
\end{align}
Here the electron/ion index is $i/j$, and only one type of ion is shown for simplicity.
Evaluation of the localized pseudopotential energy $\Psi_T^{-1}V^{PP}\Psi_T$ requires additional angular integrals. These integrals are evaluated on a randomly shifted angular grid. The size of this grid is determined by $\ell_{max}$. See Ref.~\cite{Mitas1991} for further detail.
\qmcpack uses the FSAtom pseudopotential file format associated with the ``Free Software Project for Atomic-scale Simulations'' initiated in 2002. See \url{http://www.tddft.org/fsatom/manifest.php} for more information. The FSAtom format uses XML for structured data. Files in this format do not use a specific identifying file extension; instead they are simply suffixed with ``\texttt{.xml}.'' The tabular data format of CASINO is also supported.
% FSAtom format links
% unfortunately none of the surviving links detail the format itself
% http://www.tddft.org/fsatom/index.php
% http://www.tddft.org/fsatom/programs.php
% http://www.tddft.org/fsatom/manifest.php
% http://163.13.111.58/cchu/reference/web/PseudoPotentials%20-%20FSAtom%20Wiki.htm
% http://departments.icmab.es/leem/alberto/xml/pseudo/index.html
% pseudopotential element
% dev notes
% attributes name, source, wavefunction, format are read in CoulombFactory.cpp HamiltonianFactory::addPseudoPotential
% format==''old'' refers to an old table format that is no longer supported
% read continues in ECPotentialBuilder::put()
% if format!=xml/old (i.e. table) qmcpack will attempt to read from *.psf files
% in this case, <pairpot type=''pseudo'' format=''table''/>, ie there are no elements
% if particlset groups are Li H (in order), then it looks for Li.psf and H.psf
% what is the psf format?
% if format==xml, normal read continues, i.e. <pseudo/> child elements are expected
% read is not sensitive to particleset group/species ordering
% child elements not named <pseudo/> are simply ignored (FIX!)
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{pairpot type=pseudo} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{pseudo}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{pseudo} & & Must be pseudo \\
& \texttt{name/id}$^r$ & text & \textit{anything}& PseudoPot & \textit{No current function}\\
& \texttt{source}$^r$ & text & \texttt{particleset.name} & i & Ion \texttt{particleset} name\\
& \texttt{target}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Electron \texttt{particleset} name \\
& \texttt{pbc}$^o$ & boolean & yes/no & yes$^*$ & Use Ewald summation \\
& \texttt{forces} & boolean & yes/no & no & \textit{Deprecated} \\
&\texttt{wavefunction}$^r$ & text & \texttt{wavefunction.name}& invalid & Identify wavefunction \\
& \texttt{format}$^r$ & text & xml/table & table & Select file format \\
& \texttt{algorithm}$^o$ & text & batched/default & default & Choose NLPP algorithm \\
& \texttt{DLA}$^o$ & text & yes/no & no & Use determinant localization approximation \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\textbf{type/source/target} See description for the generic \texttt{pairpot} factory element.}
\item{\textbf{name:} Ignored. Instead, default names will be present in \texttt{*scalar.dat} output files when pseudopotentials are used. The field \texttt{LocalECP} refers to the local part of the pseudopotential. If nonlocal channels are present, a \texttt{NonLocalECP} field will be added that contains the nonlocal energy summed over all angular momentum channels.}
\item{\textbf{pbc:} Ewald summation will not be performed if \texttt{simulationcell.bconds== n n n}, regardless of the value of \texttt{pbc}. Similarly, the \texttt{pbc} attribute can only be used to turn off Ewald summation if \texttt{simulationcell.bconds!= n n n}.}
\item{\textbf{format:} If \texttt{format}==table, QMCPACK looks for \texttt{*.psf} files containing pseudopotential data in a tabular format. The files must be named after the ionic species provided in \texttt{particleset} (e.g., \texttt{Li.psf} and \texttt{H.psf}). If \texttt{format}==xml, additional \texttt{pseudo} child XML elements must be provided (see the following). These elements specify individual file names and formats (both the FSAtom XML and CASINO tabular data formats are supported). }
\item{\textbf{algorithm} The default algorithm evaluates the ratios of wavefunction components together for each quadrature point and then one point after another. The batched algorithm evaluates the ratios of quadrature points together for each wavefunction component and then one component after another. Internally, it uses \texttt{VirtualParticleSet} for quadrature points. Hybrid orbital representation has an extra optimization enabled when using the batched algorithm.}
\item{\textbf{DLA} Determinant localization approximation (DLA)~\cite{Zen2019DLA} uses only the fermionic part of the wavefunction when calculating NLPP.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for pseudopotential electron-ion interaction (psf files).]
<pairpot name="PseudoPot" type="pseudo" source="i" wavefunction="psi0" format="psf"/>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for pseudopotential electron-ion interaction (xml files).]
<pairpot name="PseudoPot" type="pseudo" source="i" wavefunction="psi0" format="xml">
<pseudo elementType="Li" href="Li.xml"/>
<pseudo elementType="H" href="H.xml"/>
</pairpot>
\end{lstlisting}
%\begin{lstlisting}[caption=QMCPXML element for pseudopotential electron-ion interaction (CASINO files).]
% <pairpot name="PseudoPot" type="pseudo" source="i" wavefunction="psi0" format="xml">
% <pseudo elementType="Li" href="Li.data"/>
% <pseudo elementType="H" href="H.data"/>
% </pairpot>
%\end{lstlisting}
Details of \texttt{<pseudo/>} input elements are shown in the following. It is possible to include (or construct) a full pseudopotential directly in the input file without providing an external file via \texttt{href}. The full XML format for pseudopotentials is not yet covered.
% pseudo element
% dev notes
% initial read of href elementType/symbol attributes at ECPotentialBuilder::useXmlFormat()
% read continues in ECPComponentBuilder
% format==xml and href==none (not provided) => ECPComponentBuilder::put(cur)
% format==xml and href==a file => ECPComponentBuilder::parse(href,cur)
% format==casino => ECPComponentBuilder::parseCasino(href,cur)
% this reader is tucked away in ECPComponentBuilder.2.cpp
% nice demonstration of OhmmsAsciiParser here
% maximum cutoff defined by a 1.e-5 (Ha?) spread in the nonlocal potentials
% quadrature rules (1-7) set as in J. Chem. Phys. 95 (3467) (1991), see below
% Rule # points lexact
% 1 1 0
% 2 4 2
% 3 6 3
% 4 12 5
% 5 18 5
% 6 26 7
% 7 50 11
% looks like channels only go from s-g (see ECPComponentBuilder constructor)
% perhaps not, quadrature rules really do go up to 7 (lexact==11), see SetQuadratureRule()
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{pseudo} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{pairpot type=pseudo}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{header local grid}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{elementType/symbol}$^r$& text &\texttt{group.name}& none & Identify ionic species \\
& \texttt{href}$^r$ & text & \textit{filepath}& none & Pseudopotential file path\\
& \texttt{format}$^r$ & text & xml/casino & xml & Specify file format\\
& \texttt{cutoff}$^o$ & real & & & Nonlocal cutoff radius \\
& \texttt{lmax}$^o$ & integer & & & Largest angular momentum \\
& \texttt{nrule}$^o$ & integer & & & Integration grid order \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
\begin{lstlisting}[style=QMCPXML,caption=QMCPXML element for pseudopotential of single ionic species.]
<pseudo elementType="Li" href="Li.xml"/>
\end{lstlisting}
\subsection{MPC Interaction/correction}
The MPC interaction is an alternative to direct Ewald summation. The MPC corrects the exchange correlation hole to more closely match its thermodynamic limit. Because of this, the MPC exhibits smaller finite-size errors than the bare Ewald interaction, though a few alternative and competitive finite-size correction schemes now exist. The MPC is itself often used just as a finite-size correction in post-processing (set \texttt{physical=false} in the input).
% mpc element
% dev notes
% most attributes are read in CoulombPotentialFactory.cpp HamiltonianFactory::addMPCPotential()
% user input for the name attribute is ignored and the name is always MPC
% density G-vectors are stored in ParticleSet: Density_G and DensityReducedGvecs members
% check the Linear Extrap and Quadratic Extrap output in some real examples (see MPC::init_f_G())
% what are acceptable values for the discrepancies?
% check that these decrease as cutoff is increased
% commented out code for MPC.dat creation in MPC::initBreakup()
% short range part is 1/r, MPC::evalSR()
% long range part is on a spline (VlongSpline), MPC::evalLR()
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{pairpot type=mpc} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{mpc} & & Must be mpc \\
& \texttt{name/id}$^r$ & text & \textit{anything}& MPC & Unique name for interaction \\
& \texttt{source}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles\\
& \texttt{target}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles \\
& \texttt{physical}$^o$& boolean & yes/no & no & Hamiltonian(yes)/observable(no) \\
& \texttt{cutoff} & real & $>0$ & 30.0 & Kinetic energy cutoff \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Remarks
\begin{itemize}
\item{\texttt{physical}: Typically set to \texttt{no}, meaning the standard Ewald interaction will be used during sampling and MPC will be measured as an observable for finite-size post-correction. If \texttt{physical} is \texttt{yes}, the MPC interaction will be used during sampling. In this case an electron-electron Coulomb \texttt{pairpot} element should not be supplied.}
\item{\textbf{Developer note:} Currently the \texttt{name} attribute for the MPC interaction is ignored. The name is always reset to \texttt{MPC}.}
\end{itemize}
% MPC correction
\begin{lstlisting}[style=QMCPXML,caption=MPC for finite-size postcorrection.]
<pairpot type="MPC" name="MPC" source="e" target="e" ecut="60.0" physical="no"/>
\end{lstlisting}
% estimator element
\section{General estimators}
A broad range of estimators for physical observables are available in \qmcpack. The following sections contain input details for the total number density (\texttt{density}), number density resolved by particle spin (\texttt{spindensity}), spherically averaged pair correlation function (\texttt{gofr}), static structure factor (\texttt{sk}), static structure factor (\texttt{skall}), energy density (\texttt{energydensity}), one body reduced density matrix (\texttt{dm1b}), $S(k)$ based kinetic energy correction (\texttt{chiesa}), forward walking (\texttt{ForwardWalking}), and force (\texttt{Force}) estimators. Other estimators are not yet covered.
When an \texttt{<estimator/>} element appears in \texttt{<hamiltonian/>}, it is evaluated for all applicable chained QMC runs ({e.g.,} VMC$\rightarrow$DMC$\rightarrow$DMC). Estimators are generally not accumulated during wavefunction optimization sections. If an \texttt{<estimator/>} element is instead provided in a particular \texttt{<qmc/>} element, that estimator is only evaluated for that specific section ({e.g.,} during VMC only).
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator} factory element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{type selector:} & \multicolumn{4}{l}{\texttt{type} attribute}\\
\multicolumn{2}{l}{type options: } & \multicolumn{2}{l}{density } & \multicolumn{2}{l}{Density on a grid}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{spindensity } & \multicolumn{2}{l}{Spin density on a grid}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{gofr } & \multicolumn{2}{l}{Pair correlation function (quantum species)}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{sk } & \multicolumn{2}{l}{Static structure factor}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{SkAll } & \multicolumn{2}{l}{Static structure factor needed for finite size correction}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{structurefactor } & \multicolumn{2}{l}{Species resolved structure factor}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{specieskinetic } & \multicolumn{2}{l}{Species resolved kinetic energy}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{latticedeviation } & \multicolumn{2}{l}{Spatial deviation between two particlesets}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{momentum } & \multicolumn{2}{l}{Momentum distribution}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{energydensity } & \multicolumn{2}{l}{Energy density on uniform or Voronoi grid}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{dm1b } & \multicolumn{2}{l}{One body density matrix in arbitrary basis}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{chiesa } & \multicolumn{2}{l}{Chiesa-Ceperley-Martin-Holzmann kinetic energy correction}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{Force } & \multicolumn{2}{l}{Family of ``force'' estimators (see~\ref{sec:force_est})}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{ForwardWalking } & \multicolumn{2}{l}{Forward walking values for existing estimators}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{orbitalimages } & \multicolumn{2}{l}{Create image files for orbitals, then exit}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{flux } & \multicolumn{2}{l}{Checks sampling of kinetic energy}\\
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{localmoment } & \multicolumn{2}{l}{Atomic spin polarization within cutoff radius}\\
\dev{
\multicolumn{2}{l}{ } & \multicolumn{2}{l}{Pressure } & \multicolumn{2}{l}{\textit{No current function}}\\
\multicolumn{2}{l}{shared attributes:} & \multicolumn{4}{l}{}\\
}
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textit{See above} & 0 & Select estimator type \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for this estimator\\
%& \texttt{source}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles\\
%& \texttt{target}$^r$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify interacting particles \\
%& \texttt{units}$^o$ & text & & hartree & \textit{No current function} \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
% <estimator type="structurefactor" name="StructureFactor" report="yes"/>
% <estimator type="nofk" name="nofk" wavefunction="psi0"/>
%\dev{
%\FloatBarrier
%\begin{table}[h]
%\begin{center}
%\begin{tabularx}{\textwidth}{l l l l l X }
%\hline
%\multicolumn{6}{l}{\texttt{estimator type=X} element} \\
%\hline
%\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
%\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
%\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
% & \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
% & \texttt{type}$^r$ & text & \textbf{X} & & Must be X \\
% & \texttt{name}$^r$ & text & \textit{anything}& & Unique name for estimator \\
% & \texttt{source}$^o$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify particles\\
% & \texttt{target}$^o$ & text & \texttt{particleset.name} &\texttt{hamiltonian.target}& Identify particles \\
% \hline
%\end{tabularx}
%\end{center}
%\end{table}
%\FloatBarrier
%}
\subsection{Chiesa-Ceperley-Martin-Holzmann kinetic energy correction}
This estimator calculates a finite-size correction to the kinetic energy following the formalism laid out in Ref.~\cite{Chiesa2006}. The total energy can be corrected for finite-size effects by using this estimator in conjunction with the MPC correction.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=chiesa} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{chiesa} & & Must be chiesa \\
& \texttt{name}$^o$ & text & \textit{anything} & KEcorr & Always reset to KEcorr \\
& \texttt{source}$^o$ & text & \texttt{particleset.name} & e & Identify quantum particles\\
& \texttt{psi}$^o$ & text & \texttt{wavefunction.name} & psi0 & Identify wavefunction \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
% kinetic energy correction
\begin{lstlisting}[style=QMCPXML,caption=``Chiesa'' kinetic energy finite-size postcorrection.]
<estimator name="KEcorr" type="chiesa" source="e" psi="psi0"/>
\end{lstlisting}
\subsection{Density estimator}
The particle number density operator is given by
\begin{align}
\hat{n}_r = \sum_i\delta(r-r_i)\:.
\end{align}
The \texttt{density} estimator accumulates the number density on a uniform histogram grid over the simulation cell. The value obtained for a grid cell $c$ with volume $\Omega_c$ is then the average number of particles in that cell:
\begin{align}
n_c = \int dR \abs{\Psi}^2 \int_{\Omega_c}dr \sum_i\delta(r-r_i)\:.
\end{align}
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=density} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{density} & & Must be density \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{delta}$^o$ & real array(3) & $0\le v_i \le 1$ & 0.1 0.1 0.1 & Grid cell spacing, unit coords\\
& \texttt{x\_min}$^o$ & real & $>0$ & 0 & Grid starting point in x (Bohr)\\
& \texttt{x\_max}$^o$ & real & $>0$ &$|\texttt{lattice[0]}|$& Grid ending point in x (Bohr)\\
& \texttt{y\_min}$^o$ & real & $>0$ & 0 & Grid starting point in y (Bohr)\\
& \texttt{y\_max}$^o$ & real & $>0$ &$|\texttt{lattice[1]}|$& Grid ending point in y (Bohr)\\
& \texttt{z\_min}$^o$ & real & $>0$ & 0 & Grid starting point in z (Bohr)\\
& \texttt{z\_max}$^o$ & real & $>0$ &$|\texttt{lattice[2]}|$& Grid ending point in z (Bohr)\\
& \texttt{potential}$^o$ & boolean & yes/no & no & Accumulate local potential, \textit{Deprecated}\\
& \texttt{debug}$^o$ & boolean & yes/no & no & \textit{No current function}\\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name}: The name provided will be used as a label in the \texttt{stat.h5} file for the blocked output data. Postprocessing tools expect \texttt{name="Density."}}
\item{\texttt{delta}: This sets the histogram grid size used to accumulate the density: \texttt{delta="0.1 0.1 0.05"}$\rightarrow 10\times 10\times 20$ grid, \texttt{delta="0.01 0.01 0.01"}$\rightarrow 100\times 100\times 100$ grid. The density grid is written to a \texttt{stat.h5} file at the end of each MC block. If you request many $blocks$ in a \texttt{<qmc/>} element, or select a large grid, the resulting \texttt{stat.h5} file could be many gigabytes in size.}
\item{\texttt{*\_min/*\_max}: Can be used to select a subset of the simulation cell for the density histogram grid. For example if a (cubic) simulation cell is 20 Bohr on a side, setting \texttt{*\_min=5.0} and \texttt{*\_max=15.0} will result in a density histogram grid spanning a $10\times 10\times 10$ Bohr cube about the center of the box. Use of \texttt{x\_min, x\_max, y\_min, y\_max, z\_min, z\_max} is only appropriate for orthorhombic simulation cells with open boundary conditions.}
\item{When open boundary conditions are used, a \texttt{<simulationcell/>} element must be explicitly provided as the first subelement of \texttt{<qmcsystem/>} for the density estimator to work. In this case the molecule should be centered around the middle of the simulation cell ($L/2$) and not the origin ($0$} since the space within the cell, and hence the density grid, is defined from $0$ to $L$).
\end{itemize}
% density estimator
\begin{lstlisting}[style=QMCPXML,caption=Density estimator (uniform grid).]
<estimator name="Density" type="density" delta="0.05 0.05 0.05"/>
\end{lstlisting}
\subsection{Spin density estimator}
The spin density is similar to the total density described previously. In this case, the sum over particles is performed independently for each spin component.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=spindensity} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{spindensity} & & Must be spindensity \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{report}$^o$ & boolean & yes/no & no & Write setup details to stdout \\
\multicolumn{2}{l}{parameters} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{grid}$^o$ & integer array(3) & $v_i>0$ & & Grid cell count \\
& \texttt{dr}$^o$ & real array(3) & $v_i>0$ & & Grid cell spacing (Bohr) \\
& \texttt{cell}$^o$ & real array(3,3) & \textit{anything} & & Volume grid exists in \\
& \texttt{corner}$^o$ & real array(3) & \textit{anything} & & Volume corner location \\
& \texttt{center}$^o$ & real array(3) & \textit{anything} & & Volume center/origin location \\
& \texttt{voronoi}$^o$ & text &\texttt{particleset.name}& & \textit{Under development}\\%Ion particleset for Voronoi centers\\
& \texttt{test\_moves}$^o$& integer & $>=0$ & 0 & Test estimator with random moves \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name}: The name provided will be used as a label in the \texttt{stat.h5} file for the blocked output data. Postprocessing tools expect \texttt{name="SpinDensity."}}
\item{\texttt{grid}: The grid sets the dimension of the histogram grid. Input like \texttt{<parameter name="grid"> 40 40 40 </parameter>} requests a $40 \times 40\times 40$ grid. The shape of individual grid cells is commensurate with the supercell shape.}
\item{\texttt{dr}: The {\texttt{dr}} sets the real-space dimensions of grid cell edges (Bohr units). Input like \texttt{<parameter name="dr"> 0.5 0.5 0.5 </parameter>} in a supercell with axes of length 10 Bohr each (but of arbitrary shape) will produce a $20\times 20\times 20$ grid. The inputted \texttt{dr} values are rounded to produce an integer number of grid cells along each supercell axis. Either \texttt{grid} or \texttt{dr} must be provided, but not both.}
\item{\texttt{cell}: When \texttt{cell} is provided, a user-defined grid volume is used instead of the global supercell. This must be provided if open boundary conditions are used. Additionally, if \texttt{cell} is provided, the user must specify where the volume is located in space in addition to its size/shape (\texttt{cell}) using either the \texttt{corner} or \texttt{center} parameters.}
\item{\texttt{corner}: The grid volume is defined as $corner+\sum_{d=1}^3u_dcell_d$ with $0<u_d<1$ (``cell'' refers to either the supercell or user-provided cell).}
\item{\texttt{center}: The grid volume is defined as $center+\sum_{d=1}^3u_dcell_d$ with $-1/2<u_d<1/2$ (``cell'' refers to either the supercell or user-provided cell). \texttt{corner/center} can be used to shift the grid even if \texttt{cell} is not specified. Simultaneous use of \texttt{corner} and \texttt{center} will cause QMCPACK to abort.}
\end{itemize}
% spin density estimators
\begin{lstlisting}[style=QMCPXML,caption=Spin density estimator (uniform grid).]
<estimator type="spindensity" name="SpinDensity" report="yes">
<parameter name="grid"> 40 40 40 </parameter>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=Spin density estimator (uniform grid centered about origin).]
<estimator type="spindensity" name="SpinDensity" report="yes">
<parameter name="grid">
20 20 20
</parameter>
<parameter name="center">
0.0 0.0 0.0
</parameter>
<parameter name="cell">
10.0 0.0 0.0
0.0 10.0 0.0
0.0 0.0 10.0
</parameter>
</estimator>
\end{lstlisting}
\subsection{Pair correlation function, $g(r)$}
The functional form of the species-resolved radial pair correlation function operator is
\begin{align}
g_{ss'}(r) = \frac{V}{4\pi r^2N_sN_{s'}}\sum_{i_s=1}^{N_s}\sum_{j_{s'}=1}^{N_{s'}}\delta(r-|r_{i_s}-r_{j_{s'}}|)\:,
\end{align}
where $N_s$ is the number of particles of species $s$ and $V$ is the supercell volume. If $s=s'$, then the sum is restricted so that $i_s\ne j_s$.
In QMCPACK, an estimate of $g_{ss'}(r)$ is obtained as a radial histogram with a set of $N_b$ uniform bins of width $\delta r$. This can be expressed analytically as
\begin{align}
\tilde{g}_{ss'}(r) = \frac{V}{4\pi r^2N_sN_{s'}}\sum_{i=1}^{N_s}\sum_{j=1}^{N_{s'}}\frac{1}{\delta r}\int_{r-\delta r/2}^{r+\delta r/2}dr'\delta(r'-|r_{si}-r_{s'j}|)\:,
\end{align}
where the radial coordinate $r$ is restricted to reside at the bin centers, $\delta r/2, 3 \delta r/2, 5 \delta r/2, \ldots$.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\linewidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=gofr} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{gofr} & & Must be gofr \\
& \texttt{name}$^o$ & text & \textit{anything} & any & \textit{No current function} \\
& \texttt{num\_bin}$^r$& integer & $>1$ & 20 & \# of histogram bins \\
& \texttt{rmax}$^o$ & real & $>0$ & 10 & Histogram extent (Bohr) \\
& \texttt{dr}$^o$ & real & $>0$ & 0.5 & \textit{No current function} \\%Histogram bin width (Bohr) \\
& \texttt{debug}$^o$ & boolean & yes/no & no & \textit{No current function} \\
& \texttt{target}$^o$ & text &\texttt{particleset.name}&\texttt{hamiltonian.target}& Quantum particles \\
& \texttt{source/sources}$^o$& text array &\texttt{particleset.name}&\texttt{hamiltonian.target}& Classical particles\\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{num\_bin:} This is the number of bins in each species pair radial histogram.}
\item{\texttt{rmax:} This is the maximum pair distance included in the histogram. The uniform bin width is $\delta r=\texttt{rmax/num\_bin}$. If periodic boundary conditions are used for any dimension of the simulation cell, then the default value of \texttt{rmax} is the simulation cell radius instead of 10 Bohr. For open boundary conditions, the volume ($V$) used is 1.0 Bohr$^3$.}
\item{\texttt{source/sources:} If unspecified, only pair correlations between each species of quantum particle will be measured. For each classical particleset specified by \texttt{source/sources}, additional pair correlations between each quantum and classical species will be measured. Typically there is only one classical particleset (e.g., \texttt{source="ion0"}), but there can be several in principle (e.g., \texttt{sources="ion0 ion1 ion2"}).}
\item{\texttt{target:} The default value is the preferred usage (i.e., \texttt{target} does not need to be provided).}
\item{Data is output to the \texttt{stat.h5} for each QMC subrun. Individual histograms are named according to the quantum particleset and index of the pair. For example, if the quantum particleset is named ``e" and there are two species (up and down electrons, say), then there will be three sets of histogram data in each \texttt{stat.h5} file named \texttt{gofr\_e\_0\_0}, \texttt{gofr\_e\_0\_1}, and \texttt{gofr\_e\_1\_1} for up-up, up-down, and down-down correlations, respectively.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=Pair correlation function estimator element.]
<estimator type="gofr" name="gofr" num_bin="200" rmax="3.0" />
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=Pair correlation function estimator element with additional electron-ion correlations.]
<estimator type="gofr" name="gofr" num_bin="200" rmax="3.0" source="ion0" />
\end{lstlisting}
\subsection{Static structure factor, $S(k)$}
Let $\rho^e_{\mathbf{k}}=\sum_j e^{i \mathbf{k}\cdot\mathbf{r}_j^e}$ be the Fourier space electron density, with $\mathbf{r}^e_j$ being the coordinate of the j-th electron. $\mathbf{k}$ is a wavevector commensurate with the simulation cell. QMCPACK allows the user to accumulate the static electron structure factor $S(\mathbf{k})$ at all commensurate $\mathbf{k}$ such that $|\mathbf{k}| \leq (LR\_DIM\_CUTOFF) r_c$. $N^e$ is the number of electrons, \texttt{LR\_DIM\_CUTOFF} is the optimized breakup parameter, and $r_c$ is the Wigner-Seitz radius. It is defined as follows:
\begin{equation}
S(\mathbf{k}) = \frac{1}{N^e}\langle \rho^e_{-\mathbf{k}} \rho^e_{\mathbf{k}} \rangle\:.
\end{equation}
% has a CUDA counterpart, may be useful to understand difference between cpu and gpu estimators
% see HamiltonianFactory.cpp
% SkEstimator_CUDA* apot=new SkEstimator_CUDA(*targetPtcl);
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=sk} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & sk & & Must be sk \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{hdf5}$^o$ & boolean & yes/no & no & Output to \texttt{stat.h5} (yes) or \texttt{scalar.dat} (no) \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name:} This is the unique name for estimator instance. A data structure of the same name will appear in \texttt{stat.h5} output files.}
\item{\texttt{hdf5:} If \texttt{hdf5==yes}, output data for $S(k)$ is directed to the \texttt{stat.h5} file (recommended usage). If \texttt{hdf5==no}, the data is instead routed to the \texttt{scalar.dat} file, resulting in many columns of data with headings prefixed by \texttt{name} and postfixed by the k-point index (e.g., \texttt{sk\_0 sk\_1 \ldots sk\_1037 \ldots}).}
\item{This estimator only works in periodic boundary conditions. Its presence in the input file is ignored otherwise.}
\item{This is not a species-resolved structure factor. Additionally, for $\mathbf{k}$ vectors commensurate with the unit cell, $S(\mathbf{k})$ will include contributions from the static electronic density, thus meaning it wil not accurately measure the electron-electron density response. }
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=Static structure factor estimator element.]
<estimator type="sk" name="sk" hdf5="yes"/>
\end{lstlisting}
\subsection{Static structure factor, \texttt{SkAll}}
In order to compute the finite size correction to the potential energy, records of $\rho(\mathbf{k})$ is required. What sets \texttt{SkAll} apart from \texttt{sk} is that \texttt{SkAll} records $\rho(\mathbf{k})$ in addition to $s(\mathbf{k})$.
% has a CUDA counterpart, may be useful to understand difference between cpu and gpu estimators
% see HamiltonianFactory.cpp
% SkEstimator_CUDA* apot=new SkEstimator_CUDA(*targetPtcl);
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=SkAll} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & sk & & Must be sk \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{source}$^r$ & text & Ion ParticleSet name & None & - \\
& \texttt{target}$^r$ & text & Electron ParticleSet name & None & - \\
& \texttt{hdf5}$^o$ & boolean & yes/no & no & Output to \texttt{stat.h5} (yes) or \texttt{scalar.dat} (no) \\
& \texttt{writeionion}$^o$ & boolean & yes/no & no & Writes file rhok\_IonIon.dat containing $s(\mathbf{k})$ for the ions \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name:} This is the unique name for estimator instance. A data structure of the same name will appear in \texttt{stat.h5} output files.}
\item{\texttt{hdf5:} If \texttt{hdf5==yes}, output data is directed to the \texttt{stat.h5} file (recommended usage). If \texttt{hdf5==no}, the data is instead routed to the \texttt{scalar.dat} file, resulting in many columns of data with headings prefixed by \texttt{rhok} and postfixed by the k-point index.}
\item{This estimator only works in periodic boundary conditions. Its presence in the input file is ignored otherwise.}
\item{This is not a species-resolved structure factor. Additionally, for $\mathbf{k}$ vectors commensurate with the unit cell, $S(\mathbf{k})$ will include contributions from the static electronic density, thus meaning it wil not accurately measure the electron-electron density response. }
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=SkAll estimator element.]
<estimator type="skall" name="SkAll" source="ion0" target="e" hdf5="yes"/>
\end{lstlisting}
\subsection{Species kinetic energy}
Record species-resolved kinetic energy instead of the total kinetic energy in the \verb|Kinetic| column of scalar.dat. \verb|SpeciesKineticEnergy| is arguably the simplest estimator in QMCPACK. The implementation of this estimator is detailed in \verb|manual/estimator/estimator_implementation.pdf|.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=specieskinetic} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & specieskinetic & & Must be specieskinetic \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{hdf5}$^o$ & boolean & yes/no & no & Output to \texttt{stat.h5} (yes) \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
\begin{lstlisting}[style=QMCPXML,caption=Species kinetic energy estimator element.]
<estimator type="specieskinetic" name="skinetic" hdf5="no"/>
\end{lstlisting}
\subsection{Lattice deviation estimator}
Record deviation of a group of particles in one particle set (target) from a group of particles in another particle set (source).
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=latticedeviation} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & latticedeviation & & Must be latticedeviation \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
& \texttt{hdf5}$^o$ & boolean & yes/no & no & Output to \texttt{stat.h5} (yes) \\
& \texttt{per\_xyz}$^o$ & boolean & yes/no & no & Directionally resolved (yes) \\
& \texttt{source}$^r$ & text & e/ion0/\dots & no & source particleset \\
& \texttt{sgroup}$^r$ & text & u/d/\dots & no & source particle group \\
& \texttt{target}$^r$ & text & e/ion0/\dots & no & target particleset \\
& \texttt{tgroup}$^r$ & text & u/d/\dots & no & target particle group \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{source}: The ``reference'' particleset to measure distances from; actual reference points are determined together with \verb|sgroup|.}
\item{\texttt{sgroup}: The ``reference'' particle group to measure distances from.}
\item{\texttt{source}: The ``target'' particleset to measure distances to.}
\item{\texttt{sgroup}: The ``target'' particle group to measure distances to. For example, in Listing~\ref{lst:latdev}, the distance from the up electron (``u'') to the origin of the coordinate system is recorded.}
\item{\texttt{per\_xyz}: Used to record direction-resolved distance. In Listing~\ref{lst:latdev}, the x,y,z coordinates of the up electron will be recorded separately if \texttt{per\_xyz=yes}.}
\item{\texttt{hdf5}: Used to record particle-resolved distances in the h5 file if \texttt{gdf5=yes}.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption={Lattice deviation estimator element.},label={lst:latdev}]
<particleset name="e" random="yes">
<group name="u" size="1" mass="1.0">
<parameter name="charge" > -1 </parameter>
<parameter name="mass" > 1.0 </parameter>
</group>
<group name="d" size="1" mass="1.0">
<parameter name="charge" > -1 </parameter>
<parameter name="mass" > 1.0 </parameter>
</group>
</particleset>
<particleset name="wf_center">
<group name="origin" size="1">
<attrib name="position" datatype="posArray" condition="0">
0.00000000 0.00000000 0.00000000
</attrib>
</group>
</particleset>
<estimator type="latticedeviation" name="latdev" hdf5="yes" per_xyz="yes"
source="wf_center" sgroup="origin" target="e" tgroup="u"/>
\end{lstlisting}
\subsection{Energy density estimator}
An energy density operator, $\hat{\mathcal{E}}_r$, satisfies
\begin{align}
\int dr \hat{\mathcal{E}}_r = \hat{H},
\end{align}
where the integral is over all space and $\hat{H}$ is the Hamiltonian. In \qmcpack, the energy density is split into kinetic and potential components
\begin{align}
\hat{\mathcal{E}}_r = \hat{\mathcal{T}}_r + \hat{\mathcal{V}}_r\:,
\end{align}
with each component given by
\begin{align}
\hat{\mathcal{T}}_r &= \frac{1}{2}\sum_i\delta(r-r_i)\hat{p}_i^2 \\
\hat{\mathcal{V}}_r &= \sum_{i<j}\frac{\delta(r-r_i)+\delta(r-r_j)}{2}\hat{v}^{ee}(r_i,r_j)
+ \sum_{i\ell}\frac{\delta(r-r_i)+\delta(r-\tilde{r}_\ell)}{2}\hat{v}^{eI}(r_i,\tilde{r}_\ell) \nonumber\\
&\qquad + \sum_{\ell< m}\frac{\delta(r-\tilde{r}_\ell)+\delta(r-\tilde{r}_m)}{2}\hat{v}^{II}(\tilde{r}_\ell,\tilde{r}_m)\:.\nonumber
\end{align}
Here, $r_i$ and $\tilde{r}_\ell$ represent electron and ion positions, respectively; $\hat{p}_i$ is a single electron momentum operator; and $\hat{v}^{ee}(r_i,r_j)$, $\hat{v}^{eI}(r_i,\tilde{r}_\ell)$, and $\hat{v}^{II}(\tilde{r}_\ell,\tilde{r}_m)$ are the electron-electron, electron-ion, and ion-ion pair potential operators (including nonlocal pseudopotentials, if present). This form of the energy density is size consistent; that is, the partially integrated energy density operators of well-separated atoms gives the isolated Hamiltonians of the respective atoms. For periodic systems with twist-averaged boundary conditions, the energy density is formally correct only for either a set of supercell k-points that correspond to real-valued wavefunctions or a k-point set that has inversion symmetry around a k-point having a real-valued wavefunction. For more information about the energy density, see Ref.~\cite{Krogel2013}.
In \qmcpack, the energy density can be accumulated on piecewise uniform 3D grids in generalized Cartesian, cylindrical, or spherical coordinates. The energy density integrated within Voronoi volumes centered on ion positions is also available. The total particle number density is also accumulated on the same grids by the energy density estimator for convenience so that related quantities, such as the regional energy per particle, can be computed easily.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=EnergyDensity} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{reference\_points, spacegrid}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{EnergyDensity} & & Must be EnergyDensity \\
& \texttt{name}$^r$ & text & \textit{anything} & & Unique name for estimator \\
& \texttt{dynamic}$^r$ & text & \texttt{particleset.name} & & Identify electrons \\
& \texttt{static}$^o$ & text & \texttt{particleset.name} & & Identify ions \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name:} Must be unique. A dataset with blocked statistical data for the energy density will appear in the \texttt{stat.h5} files labeled as \texttt{name}.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=Energy density estimator accumulated on a $20 \times 10 \times 10$ grid over the simulation cell.]
<estimator type="EnergyDensity" name="EDcell" dynamic="e" static="ion0">
<spacegrid coord="cartesian">
<origin p1="zero"/>
<axis p1="a1" scale=".5" label="x" grid="-1 (.05) 1"/>
<axis p1="a2" scale=".5" label="y" grid="-1 (.1) 1"/>
<axis p1="a3" scale=".5" label="z" grid="-1 (.1) 1"/>
</spacegrid>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=Energy density estimator accumulated within spheres of radius 6.9 Bohr centered on the first and second atoms in the ion0 particleset.]
<estimator type="EnergyDensity" name="EDatom" dynamic="e" static="ion0">
<reference_points coord="cartesian">
r1 1 0 0
r2 0 1 0
r3 0 0 1
</reference_points>
<spacegrid coord="spherical">
<origin p1="ion01"/>
<axis p1="r1" scale="6.9" label="r" grid="0 1"/>
<axis p1="r2" scale="6.9" label="phi" grid="0 1"/>
<axis p1="r3" scale="6.9" label="theta" grid="0 1"/>
</spacegrid>
<spacegrid coord="spherical">
<origin p1="ion02"/>
<axis p1="r1" scale="6.9" label="r" grid="0 1"/>
<axis p1="r2" scale="6.9" label="phi" grid="0 1"/>
<axis p1="r3" scale="6.9" label="theta" grid="0 1"/>
</spacegrid>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=Energy density estimator accumulated within Voronoi polyhedra centered on the ions.]
<estimator type="EnergyDensity" name="EDvoronoi" dynamic="e" static="ion0">
<spacegrid coord="voronoi"/>
</estimator>
\end{lstlisting}
The \texttt{<reference\_points/>} element provides a set of points for later use in specifying the origin and coordinate axes needed to construct a spatial histogramming grid. Several reference points on the surface of the simulation cell (see Table~\ref{tab:ref_points}), as well as the positions of the ions (see the \texttt{energydensity.static} attribute), are made available by default. The reference points can be used, for example, to construct a cylindrical grid along a bond with the origin on the bond center.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{reference\_points} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{estimator type=EnergyDensity}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{coord}$^r$ & text & Cartesian/cell & & Specify coordinate system \\
\multicolumn{2}{l}{body text} & \multicolumn{4}{l}{}\\
& & \multicolumn{4}{l}{The body text is a line formatted list of points with labels} \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information
\begin{itemize}
\item{\texttt{coord:} If \texttt{coord=cartesian}, labeled points are in Cartesian (x,y,z) format in units of Bohr. If \texttt{coord=cell}, then labeled points are in units of the simulation cell axes.}
\item{\texttt{body text:} The list of points provided in the body text are line formatted, with four entries per line (\textit{label} \textit{coor1} \textit{coor2} \textit{coor3}}). A set of points referenced to the simulation cell is available by default (see Table~\ref{tab:ref_points}). If \texttt{energydensity.static} is provided, the location of each individual ion is also available (e.g., if \texttt{energydensity.static=ion0}, then the location of the first atom is available with label ion01, the second with ion02, etc.). All points can be used by label when constructing spatial histogramming grids (see the following \texttt{spacegrid} element) used to collect energy densities.
\end{itemize}
\FloatBarrier
\begin{table}[h]
\begin{center}
\caption{Reference points available by default. Vectors $a_1$, $a_2$, and $a_3$ refer to the simulation cell axes. The representation of the cell is centered around \texttt{zero}.\label{tab:ref_points}}
\begin{tabular}{l l l}
\hline
\texttt{label} & \texttt{point} & \texttt{description} \\
\hline
\texttt{zero} & 0 0 0 & Cell center \\
\texttt{a1} & $a_1$ & Cell axis 1 \\
\texttt{a2} & $a_2$ & Cell axis 2 \\
\texttt{a3} & $a_3$ & Cell axis 3 \\
\texttt{f1p} & $a_1$/2 & Cell face 1+ \\
\texttt{f1m} & -$a_1$/2 & Cell face 1- \\
\texttt{f2p} & $a_2$/2 & Cell face 2+ \\
\texttt{f2m} & -$a_2$/2 & Cell face 2- \\
\texttt{f3p} & $a_3$/2 & Cell face 3+ \\
\texttt{f3m} & -$a_3$/2 & Cell face 3- \\
\texttt{cppp} & $(a_1+a_2+a_3)/2$ & Cell corner +,+,+ \\
\texttt{cppm} & $(a_1+a_2-a_3)/2$ & Cell corner +,+,- \\
\texttt{cpmp} & $(a_1-a_2+a_3)/2$ & Cell corner +,-,+ \\
\texttt{cmpp} & $(-a_1+a_2+a_3)/2$ & Cell corner -,+,+ \\
\texttt{cpmm} & $(a_1-a_2-a_3)/2$ & Cell corner +,-,- \\
\texttt{cmpm} & $(-a_1+a_2-a_3)/2$ & Cell corner -,+,- \\
\texttt{cmmp} & $(-a_1-a_2+a_3)/2$ & Cell corner -,-,+ \\
\texttt{cmmm} & $(-a_1-a_2-a_3)/2$ & Cell corner -,-,- \\
\hline
\end{tabular}
\end{center}
\end{table}
\FloatBarrier
The \texttt{<spacegrid/>} element is used to specify a spatial histogramming grid for the energy density. Grids are constructed based on a set of, potentially nonorthogonal, user-provided coordinate axes. The axes are based on information available from \texttt{reference\_points}. Voronoi grids are based only on nearest neighbor distances between electrons and ions. Any number of space grids can be provided to a single energy density estimator.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{spacegrid} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{estimator type=EnergyDensity}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{origin, axis}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{coord}$^r$ & text & Cartesian & & Specify coordinate system \\
& & & cylindrical & & \\
& & & spherical & & \\
& & & Voronoi & & \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
The \texttt{<origin/>} element gives the location of the origin for a non-Voronoi grid.\\
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{origin} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{spacegrid}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{p1}$^r$ & text & \texttt{reference\_point.label} & & Select end point \\
& \texttt{p2}$^o$ & text & \texttt{reference\_point.label} & & Select end point \\
& \texttt{fraction}$^o$& real & & 0 & Interpolation fraction \\
\hline
\end{tabularx}
\end{center}
\end{table}
Additional information:
\begin{itemize}
\item{\texttt{p1/p2/fraction:} The location of the origin is set to \texttt{p1+fraction*(p2-p1)}. If only \texttt{p1} is provided, the origin is at \texttt{p1}.}
\end{itemize}
\FloatBarrier
The \texttt{<axis/>} element represents a coordinate axis used to construct the, possibly curved, coordinate system for the histogramming grid. Three \texttt{<axis/>} elements must be provided to a non-Voronoi \texttt{<spacegrid/>} element.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{axis} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{spacegrid}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{label}$^r$ & text & \textit{See below}& & Axis/dimension label \\
& \texttt{grid}$^r$ & text & & ``0 1" & Grid ranges/intervals \\
& \texttt{p1}$^r$ & text & \texttt{reference\_point.label} & & Select end point \\
& \texttt{p2}$^o$ & text & \texttt{reference\_point.label} & & Select end point \\
& \texttt{scale}$^o$ & real & & & Interpolation fraction\\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{label:} The allowed set of axis labels depends on the coordinate system (i.e., \texttt{spacegrid.coord}). Labels are \texttt{x/y/z} for \texttt{coord=cartesian}, \texttt{r/phi/z} for \texttt{coord=cylindrical}, \texttt{r/phi/theta} for \texttt{coord=spherical}.}
\item{\texttt{p1/p2/scale:} The axis vector is set to \texttt{p1+scale*(p2-p1)}. If only \texttt{p1} is provided, the axis vector is \texttt{p1}.}
\item{\texttt{grid:} The grid specifies the histogram grid along the direction specified by \texttt{label}. The allowed grid points fall in the range [-1,1] for \texttt{label=x/y/z} or [0,1] for \texttt{r/phi/theta}. A grid of 10 evenly spaced points between 0 and 1 can be requested equivalently by \texttt{grid="0 (0.1) 1"} or \texttt{grid="0 (10) 1."} Piecewise uniform grids covering portions of the range are supported, e.g., \texttt{grid="-0.7 (10) 0.0 (20) 0.5."} }
\item{Note that \texttt{grid} specifies the histogram grid along the (curved) coordinate given by \texttt{label}. The axis specified by \texttt{p1/p2/scale} does not correspond one-to-one with \texttt{label} unless \texttt{label=x/y/z}, but the full set of axes provided defines the (sheared) space on top of which the curved (e.g., spherical) coordinate system is built. }
\end{itemize}
\subsection{One body density matrix}
The N-body density matrix in DMC is $\hat{\rho}_N=\operator{\Psi_{T}}{}{\Psi_{FN}}$ (for VMC, substitute $\Psi_T$ for $\Psi_{FN}$). The one body reduced density matrix (1RDM) is obtained by tracing out all particle coordinates but one:
\begin{align}
\hat{n}_1 &= \sum_nTr_{R_n}\operator{\Psi_{T}}{}{\Psi_{FN}}\:.
\end{align}
In this formula, the sum is over all electron indices and $Tr_{R_n}(*)\equiv\int dR_n\expval{R_n}{*}{R_n}$ with $R_n=[r_1,...,r_{n-1},r_{n+1},...,r_N]$. When the sum is restricted over spin-up or spin-down electrons, one obtains a density matrix for each spin species. The 1RDM computed by \qmcpack is partitioned in this way.
In real space, the matrix elements of the 1RDM are
\begin{align}
n_1(r,r') &= \expval{r}{\hat{n}_1}{r'} = \sum_n\int dR_n \Psi_T(r,R_n)\Psi_{FN}^*(r',R_n)\:.
\end{align}
A more efficient and compact representation of the 1RDM is obtained by expanding in the SPOs obtained from a Hartree-Fock or DFT calculation, $\{\phi_i\}$:
\begin{align}\label{eq:dm1b_direct}
n_1(i,j) &= \expval{\phi_i}{\hat{n}_1}{\phi_j} \nonumber \\
&= \int dR \Psi_{FN}^*(R)\Psi_{T}(R) \sum_n\int dr'_n \frac{\Psi_T(r_n',R_n)}{\Psi_T(r_n,R_n)}\phi_i(r_n')^* \phi_j(r_n)\:.
\end{align}
The integration over $r'$ in Eq.~\ref{eq:dm1b_direct} is inefficient when one is also interested in obtaining matrices involving energetic quantities, such as the energy density matrix of Ref.~\cite{Krogel2014} or the related (and more well known) generalized Fock matrix. For this reason, an approximation is introduced as follows:
\begin{align}
n_1(i,j) \approx \int dR \Psi_{FN}(R)^*\Psi_T(R) \sum_n \int dr_n' \frac{\Psi_T(r_n',R_n)^*}{\Psi_T(r_n,R_n)^*}\phi_i(r_n)^* \phi_j(r_n')\:.
\end{align}
For VMC, FN-DMC, FP-DMC, and RN-DMC this formula represents an exact sampling of the 1RDM corresponding to $\hat{\rho}_N^\dagger$ (see appendix A of Ref.~\cite{Krogel2014} for more detail).
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=dm1b} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{none}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{dm1b} & & Must be dm1b \\
& \texttt{name}$^r$ & text & \textit{anything}& & Unique name for estimator \\
\multicolumn{2}{l}{parameters} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
&\texttt{basis}$^r$ & text array & sposet.name(s) & & Orbital basis \\
&\texttt{integrator}$^o$ & text & uniform\_grid & uniform\_grid & Integration method \\
& & & uniform & & \\
& & & density & & \\
&\texttt{evaluator}$^o$ & text & loop/matrix & loop & Evaluation method \\
&\texttt{scale}$^o$ & real & $0<scale<1$ & 1.0 & Scale integration cell\\
&\texttt{center}$^o$ & real array(3)&\textit{any point}& & Center of cell \\
&\texttt{points}$^o$ & integer & $>0$ & 10 & Grid points in each dim\\
&\texttt{samples}$^o$ & integer & $>0$ & 10 & MC samples \\
&\texttt{warmup}$^o$ & integer & $>0$ & 30 & MC warmup \\
&\texttt{timestep}$^o$ & real & $>0$ & 0.5 & MC time step \\
&\texttt{use\_drift}$^o$ & boolean & yes/no & no & Use drift in VMC \\
&\texttt{check\_overlap}$^o$& boolean & yes/no & no & Print overlap matrix \\
&\texttt{check\_derivatives}$^o$& boolean & yes/no & no & Check density derivatives \\
&\texttt{acceptance\_ratio}$^o$& boolean & yes/no & no & Print accept ratio \\
&\texttt{rstats}$^o$ & boolean & yes/no & no & Print spatial stats \\
&\texttt{normalized}$^o$ & boolean & yes/no & yes & \texttt{basis} comes norm'ed \\
&\texttt{volume\_normed}$^o$& boolean & yes/no & yes & \texttt{basis} norm is volume \\
&\texttt{energy\_matrix}$^o$& boolean & yes/no & no & Energy density matrix \\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\texttt{name:} Density matrix results appear in \texttt{stat.h5} files labeled according to \texttt{name}.}
\item{\texttt{basis:} List \texttt{sposet.name}'s. The total set of orbitals contained in all \texttt{sposet}'s comprises the basis (subspace) onto which the one body density matrix is projected. This set of orbitals generally includes many virtual orbitals that are not occupied in a single reference Slater determinant.}
\item{\texttt{integrator:} Select the method used to perform the additional single particle integration. Options are \texttt{uniform\_grid} (uniform grid of points over the cell), \texttt{uniform} (uniform random sampling over the cell), and \texttt{density} (Metropolis sampling of approximate density, $\sum_{b\in \texttt{basis}}\abs{\phi_b}^2$, is not well tested, please check results carefully!)}. Depending on the integrator selected, different subsets of the other input parameters are active.
\item{\texttt{evaluator:} Select for-loop or matrix multiply implementations. Matrix is preferred for speed. Both implementations should give the same results, but please check as this has not been exhaustively tested.}
\item{\texttt{scale:} Resize the simulation cell by scale for use as an integration volume (active for \texttt{integrator=uniform/uniform\_grid}).}
\item{\texttt{center:} Translate the integration volume to center at this point (active for \texttt{integrator=uniform/\\uniform\_grid}). If \texttt{center} is not provided, the scaled simulation cell is used as is. }
\item{\texttt{points:} Number of grid points in each dimension for \texttt{integrator=uniform\_grid}. For example, \texttt{points=10} results in a uniform $10 \times 10 \times 10$ grid over the cell.}
\item{\texttt{samples:} Sets the number of MC samples collected for each step (active for \texttt{integrator=uniform/\\density}). }
\item{\texttt{warmup:} Number of warmup Metropolis steps at the start of the run before data collection (active for \texttt{integrator=density}). }
\item{\texttt{timestep:} Drift-diffusion time step used in Metropolis sampling (active for \texttt{integrator=density}).}
\item{\texttt{use\_drift:} Enable drift in Metropolis sampling (active for \texttt{integrator=density}).}
\item{\texttt{check\_overlap:} Print the overlap matrix (computed via simple Riemann sums) to the log, then abort. Note that subsequent analysis based on the 1RDM is simplest if the input orbitals are orthogonal.}
\item{\texttt{check\_derivatives:} Print analytic and numerical derivatives of the approximate (sampled) density for several sample points, then abort. }
\item{\texttt{acceptance\_ratio:} Print the acceptance ratio of the density sampling to the log for each step.}
\item{\texttt{rstats:} Print statistical information about the spatial motion of the sampled points to the log for each step.}
\item{\texttt{normalized:} Declare whether the inputted orbitals are normalized or not. If \texttt{normalized=no}, direct Riemann integration over a $200 \times 200 \times 200$ grid will be used to compute the normalizations before use.}
\item{\texttt{volume\_normed:} Declare whether the inputted orbitals are normalized to the cell volume (default) or not (a norm of 1.0 is assumed in this case). Currently, B-spline orbitals coming from QE and HEG planewave orbitals native to QMCPACK are known to be volume normalized.}
\item{\texttt{energy\_matrix:} Accumulate the one body reduced energy density matrix, and write it to \texttt{stat.h5}. This matrix is not covered in any detail here; the interested reader is referred to Ref.~\cite{Krogel2014}.}
\end{itemize}
\begin{lstlisting}[style=QMCPXML,caption=One body density matrix with uniform grid integration.]
<estimator type="dm1b" name="DensityMatrices">
<parameter name="basis" > spo_u spo_uv </parameter>
<parameter name="evaluator" > matrix </parameter>
<parameter name="integrator" > uniform_grid </parameter>
<parameter name="points" > 4 </parameter>
<parameter name="scale" > 1.0 </parameter>
<parameter name="center" > 0 0 0 </parameter>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=One body density matrix with uniform sampling.]
<estimator type="dm1b" name="DensityMatrices">
<parameter name="basis" > spo_u spo_uv </parameter>
<parameter name="evaluator" > matrix </parameter>
<parameter name="integrator" > uniform </parameter>
<parameter name="samples" > 64 </parameter>
<parameter name="scale" > 1.0 </parameter>
<parameter name="center" > 0 0 0 </parameter>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption=One body density matrix with density sampling.]
<estimator type="dm1b" name="DensityMatrices">
<parameter name="basis" > spo_u spo_uv </parameter>
<parameter name="evaluator" > matrix </parameter>
<parameter name="integrator" > density </parameter>
<parameter name="samples" > 64 </parameter>
<parameter name="timestep" > 0.5 </parameter>
<parameter name="use_drift" > no </parameter>
</estimator>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption={Example \texttt{sposet} initialization for density matrix use. Occupied and virtual orbital sets are created separately, then joined (\texttt{basis="spo\_u spo\_uv"}).}]
<sposet_builder type="bspline" href="../dft/pwscf_output/pwscf.pwscf.h5" tilematrix="1 0 0 0 1 0 0 0 1" twistnum="0" meshfactor="1.0" gpu="no" precision="single">
<sposet type="bspline" name="spo_u" group="0" size="4"/>
<sposet type="bspline" name="spo_d" group="0" size="2"/>
<sposet type="bspline" name="spo_uv" group="0" index_min="4" index_max="10"/>
</sposet_builder>
\end{lstlisting}
\begin{lstlisting}[style=QMCPXML,caption={Example \texttt{sposet} initialization for density matrix use. Density matrix orbital basis created separately (\texttt{basis="dm\_basis"}).}]
<sposet_builder type="bspline" href="../dft/pwscf_output/pwscf.pwscf.h5" tilematrix="1 0 0 0 1 0 0 0 1" twistnum="0" meshfactor="1.0" gpu="no" precision="single">
<sposet type="bspline" name="spo_u" group="0" size="4"/>
<sposet type="bspline" name="spo_d" group="0" size="2"/>
<sposet type="bspline" name="dm_basis" size="50" spindataset="0"/>
</sposet_builder>
\end{lstlisting}
% <estimator type="dm1b" name="DensityMatrices">
% <parameter name="energy_matrix" > yes </parameter>
% <parameter name="integrator" > uniform_grid </parameter>
% <parameter name="points" > 6 </parameter>
% <parameter name="scale" > 1.0 </parameter>
% <parameter name="basis" >
% spo_dm
% </parameter>
% <parameter name="evaluator" > matrix </parameter>
% <parameter name="center">
% 0 0 0
% </parameter>
% <parameter name="check_overlap" > no </parameter>
% </estimator>
%
% <sposet_builder type="bspline" href="./dft/pwscf_output/pwscf.pwscf.h5" tilematrix="1 0 0 0 1 0 0 0 1" twistnum="0" meshfactor="1.0" gpu="no" precision="single" sort="0">
% <sposet type="bspline" name="spo_u" size="4" spindataset="0"/>
% <sposet type="bspline" name="spo_d" size="2" spindataset="1"/>
% <sposet type="bspline" name="dm_basis" size="50" spindataset="0"/>
% </sposet_builder>
% <estimator type="dm1b" name="DensityMatrices">
% <parameter name="energy_matrix" > yes </parameter>
% <parameter name="integrator" > uniform_grid </parameter>
% <parameter name="points" > 10 </parameter>
% <parameter name="scale" > 1.0 </parameter>
% <parameter name="basis" > dm_basis </parameter>
% <parameter name="normalized" > no </parameter>
% <parameter name="evaluator" > matrix </parameter>
% <parameter name="center" > 0 0 0 </parameter>
% <parameter name="check_overlap" > no </parameter>
% <parameter name="rstats" > no </parameter>
% </estimator>
%
%
% found at /psi2/home/development/qmcpack/energy_density_matrix/tests/r6080_edm/02_atoms/runs/O/qmc/vmc.in.xml
%
% <sposet_builder type="bspline" href="../dft/pwscf_output/pwscf.pwscf.h5" tilematrix="1 0 0 0 1 0 0 0 1" twistnum="0" meshfactor="1.0" gpu="no" precision="single">
% <sposet type="bspline" name="spo_u" group="0" size="4"/>
% <sposet type="bspline" name="spo_d" group="0" size="2"/>
% <sposet type="bspline" name="spo_uv" group="0" index_min="4" index_max="10"/>
% </sposet_builder>
% <estimator type="dm1b" name="DensityMatrices">
% <parameter name="basis" > spo_u spo_uv </parameter>
% <parameter name="energy_matrix"> yes </parameter>
% <parameter name="evaluator" > matrix </parameter>
% <parameter name="center" > 0 0 0 </parameter>
% <parameter name="rstats" > no </parameter>
% <parameter name="acceptance_ratio" > no </parameter>
% <parameter name="check_overlap" > no </parameter>
% <parameter name="check_derivatives"> no </parameter>
%
% <parameter name="integrator" > uniform_grid </parameter>
% <parameter name="points" > 20 </parameter>
% <parameter name="scale" > 1.0 </parameter>
%
% <!--
% <parameter name="integrator" > uniform </parameter>
% <parameter name="samples" > 14 </parameter>
% <parameter name="scale" > 1.0 </parameter>
% -->
%
% <!--
% <parameter name="integrator" > density </parameter>
% <parameter name="timestep" > 1.0 </parameter>
% <parameter name="use_drift" > no </parameter>
% <parameter name="samples" > 1000 </parameter>
% -->
%
% <!--
% <parameter name="integrator" > density </parameter>
% <parameter name="timestep" > 1.0 </parameter>
% <parameter name="use_drift" > yes </parameter>
% <parameter name="samples" > 1000 </parameter>
% -->
% </estimator>
\section{Forward-Walking Estimators} \label{sec:forward_walking}
Forward walking is a method for sampling the pure fixed-node distribution $\langle \Phi_0 | \Phi_0\rangle$. Specifically, one multiplies each walker's DMC mixed estimate for the observable $\mathcal{O}$, $\frac{\mathcal{O}(\mathbf{R})\Psi_T(\mathbf{R})}{\Psi_T(\mathbf{R})}$, by the weighting factor $\frac{\Phi_0(\mathbf{R})}{\Psi_T(\mathbf{R})}$. As it turns out, this weighting factor for any walker $\mathbf{R}$ is proportional to the total number of descendants the walker will have after a sufficiently long projection time $\beta$.
To forward walk on an observable, declare a generic forward-walking estimator within a \texttt{<hamiltonian>} block, and then specify the observables to forward walk on and the forward-walking parameters. Here is a summary.\\
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=ForwardWalking} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{Observable}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{type}$^r$ & text & \textbf{ForwardWalking}& & Must be ``ForwardWalking" \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Unique name for estimator \\
\hline
\end{tabularx}
\end{center}
\end{table}
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{Observable} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{estimator, hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\textit{None}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{name}$^r$ & text & \textit{anything} & any & Registered name of existing estimator on which to forward walk \\
& \texttt{max}$^r$ & integer & $ > 0$ & & Maximum projection time in steps (\texttt{max}$=\beta/\tau$) \\
& \texttt{frequency}$^r$ & text & $\geq 1$ & & Dump data only for every \texttt{frequency}-th \\
& & & & & to \texttt{scalar.dat} file \\
\hline
\end{tabularx}
\end{center}
\end{table}
Additional information:
\begin{itemize}
\item{\textbf{Cost}: Because histories of observables up to \texttt{max} time steps have to be stored, the memory cost of storing the nonforward-walked observables variables should be multiplied by $\texttt{max}$. Although this is not an issue for items such as potential energy, it could be prohibitive for observables such as density, forces, etc. }
\item{\textbf{Naming Convention}: Forward-walked observables are automatically named \texttt{FWE\_name\_i}, where \texttt{i} is the forward-walked expectation value at time step \texttt{i}, and \texttt{name} is whatever name appears in the \texttt{<Observable>} block. This is also how it will appear in the \texttt{scalar.dat} file. }
\end{itemize}
In the following example case, QMCPACK forward walks on the potential energy for 300 time steps and dumps the forward-walked value at every time step.
\begin{lstlisting}[style=QMCPXML,caption=Forward-walking estimator element.]
<estimator name="fw" type="ForwardWalking">
<Observable name="LocalPotential" max="300" frequency="1"/>
<!--- Additional Observable blocks go here -->
</estimator>
\end{lstlisting}
\section{``Force'' estimators} \label{sec:force_est}
% Force estimators added in CoulombPotentialFactory.cpp, HamiltonianFactory::addForceHam
QMCPACK supports force estimation by use of the Chiesa-Ceperly-Zhang (CCZ) estimator. Currently, open and periodic boundary conditions are supported but for all-electron calculations only.
Without loss of generality, the CCZ estimator for the z-component of the force on an ion centered at the origin is given by the following expression:
\begin{equation}
F_z = -Z \sum_{i=1}^{N_e}\frac{z_i}{r_i^3}[\theta(r_i-\mathcal{R}) + \theta(\mathcal{R}-r_i)\sum_{\ell=1}^{M}c_\ell r_i^\ell]\:.
\end{equation}
Z is the ionic charge, $M$ is the degree of the smoothing polynomial, $\mathcal{R}$ is a real-space cutoff of the sphere within which the bare-force estimator is smoothed, and $c_\ell$ are predetermined coefficients. These coefficients are chosen to minimize the weighted mean square error between the bare force estimate and the s-wave filtered estimator. Specifically,
\begin{equation}
\chi^2 = \int_0^\mathcal{R}dr\,r^m\,[f_z(r) - \tilde{f}_z(r)]^2\:.
\end{equation}
Here, $m$ is the weighting exponent, $f_z(r)$ is the unfiltered radial force density for the z force component, and $\tilde{f}_z(r)$ is the smoothed polynomial function for the same force density. The reader is invited to refer to the original paper for a more thorough explanation of the methodology, but with the notation in hand, QMCPACK takes the following parameters.
\FloatBarrier
\begin{table}[h]
\begin{center}
\begin{tabularx}{\textwidth}{l l l l l X }
\hline
\multicolumn{6}{l}{\texttt{estimator type=Force} element} \\
\hline
\multicolumn{2}{l}{parent elements:} & \multicolumn{4}{l}{\texttt{hamiltonian, qmc}}\\
\multicolumn{2}{l}{child elements:} & \multicolumn{4}{l}{\texttt{parameter}}\\
\multicolumn{2}{l}{attributes} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{mode}$^o$ & text & \textit{See above} & bare & Select estimator type\\
& \texttt{lrmethod}$^o$ & text & ewald or srcoul & ewald & Select long-range potential breakup method\\
& \texttt{type}$^r$ & text & Force & & Must be ``Force" \\
& \texttt{name}$^o$ & text & \textit{anything} & ForceBase & Unique name for this estimator\\
% & \texttt{psi}$^o$ & text & \texttt{wavefunction.name}& psi0 & Identify wavefunction\\
& \texttt{pbc}$^o$ & boolean & yes/no & yes & Using periodic BC's or not\\
& \texttt{addionion}$^o$ & boolean & yes/no & no & Add the ion-ion force contribution to output force estimate \\
\multicolumn{2}{l}{parameters} & \multicolumn{4}{l}{}\\
& \bfseries name & \bfseries datatype & \bfseries values & \bfseries default & \bfseries description \\
& \texttt{rcut}$^o$ & real & $> 0$ & 1.0 & Real-space cutoff $\mathcal{R}$ in bohr\\
& \texttt{nbasis}$^o$ & integer & $> 0 $ & 2 & Degree of smoothing polynomial $M$ \\
& \texttt{weightexp}$^o$ & integer &$ > 0$ & 2 & $\chi^2$ weighting exponent $m$\\
\hline
\end{tabularx}
\end{center}
\end{table}
\FloatBarrier
Additional information:
\begin{itemize}
\item{\textbf{Naming Convention}: The unique identifier \texttt{name} is appended with \texttt{name\_X\_Y} in the \texttt{scalar.dat} file, where \texttt{X} is the ion ID number and \texttt{Y} is the component ID (an integer with x=0, y=1, z=2). All force components for all ions are computed and dumped to the \texttt{scalar.dat} file.}
\item{\textbf{Long-range breakup}: With periodic boundary conditions, it is important to converge the lattice sum when calculating Coulomb contribution to the forces. As a quick test, increase the \texttt{LR\_dim\_cutoff} parameter until ion-ion forces are converged. The Ewald method (\texttt{lrmethod}="ewald") converges more slowly than optimized method (\texttt{lrmethod}="srcoul").}
\item{\textbf{Miscellaneous}: Usually, the default choice of \texttt{weightexp} is sufficient. Different combinations of \texttt{rcut} and \texttt{nbasis} should be tested though to minimize variance and bias. There is, of course, a tradeoff, with larger \texttt{nbasis} and smaller \texttt{rcut} leading to smaller biases and larger variances. }
\end{itemize}
The following is an example use case.
\begin{lstlisting}[style=QMCPXML]
<simulationcell>
...
<parameter name="LR\_dim\_cutoff"> 20 </parameter>
</simulationcell>
<estimator name="myforce" type="Force" mode="cep" addionion="yes" lrmethod="srcoul">
<parameter name="rcut">0.1</parameter>
<parameter name="nbasis">4</parameter>
<parameter name="weightexp">2</parameter>
</estimator>
\end{lstlisting}
| {
"alphanum_fraction": 0.6476418947,
"avg_line_length": 66.2787550744,
"ext": "tex",
"hexsha": "9c570a003086f42daa8dcdc8f400c81c72ae4c2b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_forks_repo_licenses": [
"NCSA"
],
"max_forks_repo_name": "djstaros/qmcpack",
"max_forks_repo_path": "legacy_manual/hamiltonianobservable.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"NCSA"
],
"max_issues_repo_name": "djstaros/qmcpack",
"max_issues_repo_path": "legacy_manual/hamiltonianobservable.tex",
"max_line_length": 934,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_stars_repo_licenses": [
"NCSA"
],
"max_stars_repo_name": "djstaros/qmcpack",
"max_stars_repo_path": "legacy_manual/hamiltonianobservable.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 29176,
"size": 97960
} |
%auto-ignore
\providecommand{\MainFolder}{..}
\documentclass[\MainFolder/Text.tex]{subfiles}
\begin{document}
\section{Approximation using heat form}\label{Sec:Hwe}
\allowdisplaybreaks
Given an oriented Riemannian manifold $(M,g)$, consider the \emph{heat form}
\begin{equation}\label{Eq:HK}
\KKer_t(x,y) = \sum (-1)^{kn}e^{-\lambda_i t} (\star e_i)(x) \wedge e_i(y),
\end{equation}
where $(e_i)$ are eigenvectors of $\Delta$ and $\lambda_i$ the corresponding eigenvalues. It is equivalently the Schwartz form of the operator $\exp(-\Delta t)$ (see \cite[Chapter~3]{Harris2004}) or a unique solution of the equation $\Laplace \KKer_t(x,y) = - \frac{\partial}{\partial t}\KKer_t(x,y)$ with $\lim_{t\to 0} \KKer_t = \DR_\Id$, where $\DR_\Id$ is the Schwartz form of the identity (see \cite{Hein2006}).
%
\begin{Proposition}[Properties of the heat kernel]\label{Prop:Heasd}
Let $M$ be an oriented Riemannian manifold. The heat form $\KKer_t(x,y)$ is smooth on $M\times M \times (0,\infty)$ and satisfies
$$ \Dd \KKer_t = 0, \quad \tau^*\KKer_t = (-1)^n\KKer_t\quad\text{and}\quad \frac{1}{2}\Delta \KKer_t = \Diag_x \KKer_t = - \frac{\partial }{\partial t} \KKer_t. $$
\end{Proposition}
\begin{proof}
Straightforward computation and a nice combinatorial argument.
\end{proof}
%
%\begin{proof}
%Well-known.
%%\begin{description}
%%\item[E)]
%%It suffices to prove it for $e^{-d(x,y)^2/t}$ by heat expansion. Quantitively, i.e.\ making no distinction between $x_i$'s, we have
%%$$ \partial^l e^{-r^2/t} = p_l(x,t) e^{-r^2/t}, $$
%%where $p_l = (\frac{x}{t} + \partial_x)p_{l-1}$ with $p_0 = 1$, where
%%$$ \Abs{\underbrace{\frac{\partial }{\partial x_i} d^2(x,y)}_{\sim x}} \le C d(x,y), $$
%%and hence in the estimate we can replace $x$ by $r$. For $t\in (0,1)$ it holds $\frac{1}{t^a} \le \frac{1}{t^b}$ iff $a\le b$. In every term we have only
%%$$ \frac{r^a}{t^b} e^{-\frac{r^2}{t}} = \frac{1}{t^{b-\frac{a}{2}}}(\frac{r^2}{t})^{\frac{a}{2}} e^{-\frac{r^2}{t}} = \frac{1}{t^{b-\frac{a}{2}}} \underbrace{\bigl[ 2^{\frac{a}{2}}\bigl(\frac{r^2}{2t}\bigr)^{\frac{a}{2}} e^{-\frac{r^2}{2t}} \bigr]}_{=:g_a(x), y= \frac{r^2}{2t}} e^{-\frac{r^2}{2t}} $$
%%for $a\le b$. But $g_a(y), y\ge 0$ is a bounded function. We have $p_l$ polynoms in $\frac{r}{t}$. By the recursive formulas we get that the terms $\frac{1}{t^{l/2}}$, resp. $\frac{r}{t^{(l+1)/2}}$ for $l$ even resp. $l$ odd have biggest $b-a/2$ equal to $l/2$. Indeed, both operations $x/t \cdot$ and $\partial_x$ increase $a-b/2$ by maximally $1/2$. We start with $a = 0$, $b=0$ for $l=0$ and we proceed by induction that the maximum of $a-b/2$ is $l/2$ in $p_l$. \qedhere
%%\end{description}
%\end{proof}
%
%We define
%$$ \begin{aligned}
%\GKer_t & = \int_t^\infty \bigl(\KKer_t - \HKer\bigr) \\
%\Prpg_t & = \int_t^\infty \CoDd\KKer_t
%\end{aligned} $$
%and denote $\GKer = \GKer_0$, $\Prpg = \Prpg_0$.
%
%\begin{Lemma}[Integrals dependent on parameter] \label{Lem:IntPar}
%Let $I$, $J\subset \R$ be intervals and $f(a,u) : I \times J \rightarrow \R$. Suppose that:
%\begin{enumerate}
% \item $I$ is open
% \item For all $a\in I$ is $f(a,\cdot)$ measurable in $J$.
% \item For almost all $u\in J$ is $f(\cdot,u)$ differentiable in $I$
% \item There is a $g\in L^1(J)$ such that $\Abs{\frac{\partial}{\partial a} f(a,u)} \le g(u)$ for almost all $u\in J$ and all $a\in I$
% \item There is an $a_0\in I$ such that $f(a_0,\cdot) \in L^1(J)$
%\end{enumerate}
%Then $F(a) \coloneqq \int_J f(a,u) \Diff{u}$ is finite and
%$$ F'(a) = \int_J \frac{\partial }{\partial a} f(a,u) \Diff{u} $$
%\end{Lemma}
\begin{Proposition}[Approximation using heat form]\label{Prop:HeatKerFormulas}
Let $M$ be an oriented Riemannian manifold and $\KKer_t(x,y)$ the heat form.
For all $(t,x,y)\in \bigl([0,\infty)\times M \times M\bigr)\backslash \{0\}\times \Diag =: D(\KKer)$, define
\begin{equation}\label{Eq:HeatKerApprox}
\begin{aligned}
\GKer_t(x,y) &\coloneqq \int_t^\infty \KKer_\tau(x,y)\Diff{\tau}\quad\text{and}\\
\Prpg_t(x,y) &\coloneqq (-1)^{n+1}\int_t^\infty (\Id\COtimes\CoDd_y) \KKer_\tau(x,y)\Diff{\tau}.
\end{aligned}
\end{equation}
Then:
\begin{ClaimList}
\item The forms $\GKer_t$ and $\Prpg_t$ are smooth on $D(\KKer)$, the point-wise limits $\GKer'$ and $\Prpg'$ as $t\to 0$ exist, and it holds $\GKer_t \darrow[t]\GKer'$ and $\Prpg_t\darrow[t]\Prpg'$ as $t\to 0$ (uniform convergence) in $C^\infty_{\text{loc}}(M\times M\backslash\Diag)$.
\item On $D(\KKer)$, the following relations hold:
\begin{align*}
\Dd \GKer_t &= 0 & \Prpg_t &= (-1)^{n+1}\frac{1}{2} \CoDd\GKer_t \\
\Laplace \GKer_t &= \KKer_t - \HKer & \Dd \Prpg_t &= (-1)^n(\HKer - \KKer_t) \\
\tau^*\GKer_t &=(-1)^n\GKer_t & \tau^* \Prpg_t &= (-1)^n \Prpg_t.
\end{align*}
It follows that $\GKer' = \GKer$ is the (Laplace) Green form and $\Prpg' = \StdPrpg$ the standard Hodge propagator.
\end{ClaimList}
\end{Proposition}
\begin{proof}
The formal computation is clear. An honest proof uses the standard heat kernel estimates.
\ToDo[caption={Say more},noline]{Say more about the proofs!}
%The relations follow from
%$$ \Dd \KKer_t = 0\quad\text{and}\quad (\CoDd_x\COtimes\Id)\KKer_t = (\Id\COtimes\CoDd_y) = \frac{1}{2}\CoDd\KKer_t. $$
%Use properties of the heat kernel,... Also see Harris, Heine,.. We have that $\CoDd = \CoDd_x \COtimes \Id + \Id \COtimes \CoDd_y$, it holds $(\CoDd_x \COtimes \Id)\tau^* = \Id \COtimes \CoDd_y$.
\end{proof}
\begin{Proposition}[$\StdPrpg$ is codifferential of $\GKer$]\label{Prop:StdCodifInt}
Let $M$ be an oriented Riemannian manifold, and let $\GKer\in \DR^n(M\times M\backslash\Diag)$ be the Green form. Then the standard Hodge propagator $\StdPrpg$ satisfies
\begin{equation}\label{Eq:FormForPUsingG}
\StdPrpg(x,y)= (-1)^{n+1}(\Id\otimes \CoDd_y)\GKer(x,y),
\end{equation}
where $\Id\otimes \CoDd_y: \DR^\bullet(M\times M) \rightarrow \DR^{\bullet-1}(M \times M)$ is the differential operator defined in local coordinates by commuting $\CoDd$ over the first factor with the Koszul sign and applying it to the second factor.
\end{Proposition}
\begin{proof}
As for the signs, $(-1)^n$ comes from $\TOp \GOp \omega(y) = (-1)^{nT}\int_x(\Id \otimes \TOp_y)\GKer(x,y)\omega(x)$ with $T=\CoDd$ and $-1$ from $\StdHtp = - \CoDd\GOp$.
The rest can be proven using the heat kernel approximation and standard heat kernel estimates.
There is an other method using the asymptotic expansion of $\GKer$, which was shown to the author by Prof.~Dr.~Christian~Bär.
\ToDo[caption={Say more},noline]{Say more about the proofs!}
\end{proof}
%We have the following.
%
%\begin{Proposition}[Asymptotic expansion of $\KKer_t$ and $\GKer$]
%\end{Proposition}
%
%\begin{itemize}
% \item Asymptotic expansion of
% \item
%\end{itemize}
%
%Does $\GKer$ extend to the blow-up?
%
%Can I use this to show that $\CoDd \GKer$ does too extend to blow-up?
%
%\begin{Remark}[Operators not extending to blow-up]
%Under the change $x=u+r\omega$, $y=u-r\omega$ and $\tilde{f}(r,\omega,u) = f(x,y)$, we compute the following
%\begin{align*}
%\frac{\partial \tilde{f}}{\partial x^i} & = \frac{1}{2} \omega_i \frac{\partial \tilde{f}}{\partial r} + \frac{1}{2r}\sum_{j=1}^{n+1}(\delta_{ij} - \omega_i\omega_j)\frac{\partial\tilde{f}}{\partial \omega^j} + \frac{1}{2}\frac{\partial\tilde{f}}{\partial u^i} \\
%\frac{\partial \tilde{f}}{\partial y^i} & = -\frac{1}{2} \omega_i \frac{\partial \tilde{f}}{\partial r} - \frac{1}{2r}\sum_{j=1}^{n+1}(\delta_{ij} - \omega_i\omega_j)\frac{\partial\tilde{f}}{\partial \omega^j} + \frac{1}{2}\frac{\partial\tilde{f}}{\partial u^i}
%\end{align*}
%Because of the middle $\frac{1}{r}$ these do not always descend. We also compute
%\end{Remark}
\end{document}
| {
"alphanum_fraction": 0.6599763872,
"avg_line_length": 61.9756097561,
"ext": "tex",
"hexsha": "195cfc66da2f851df3ac1ccc1e7c5411c9ef74b7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "p135246/phd-thesis",
"max_forks_repo_path": "Subfiles/GrKer_GrHeat.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "p135246/phd-thesis",
"max_issues_repo_path": "Subfiles/GrKer_GrHeat.tex",
"max_line_length": 476,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0e124466a3d0ff988c012225400fadb0b170aa9e",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "p135246/phd-thesis",
"max_stars_repo_path": "Subfiles/GrKer_GrHeat.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2917,
"size": 7623
} |
\documentclass[edeposit,fullpage,12pt]{uiucthesis2009}
% Use draftthesis for notes and date markings on every page. Useful when you
% have multiple copies floating around.
% Use offcenter for the extra .5 inch on the left side. Needed with fullpage and fancy.
% Use mixcasechap for compatibility with hyperref package, which does NOT like all caps default
% Use edeposit for the adviser/committee on the title page.
% Use tocnosub to suppress subsection and lower entries in the TOC.
% PhD candidates use "proquest" for the proquest abstract.
\makeatletter
\usepackage{setspace}
%\usepackage{epsfig} % for figures
\usepackage{graphicx} % another package that works for figures
\usepackage{multirow}
\usepackage{placeins}
\usepackage{caption} % allows center figures caption
\usepackage{booktabs} % nice rules (thick lines) for tables
\usepackage{array}
\usepackage{tabularx}
\usepackage[table]{xcolor}
\newcolumntype{b}{>{\hsize=1.0\hsize}X}
\newcolumntype{s}{>{\hsize=.5\hsize}X}
\newcolumntype{m}{>{\hsize=.75\hsize}X}
\newcolumntype{x}{>{\hsize=.25\hsize}X}
\newcolumntype{L}{>{\raggedright\arraybackslash}X}
\newcolumntype{R}{>{\raggedleft\arraybackslash}X}
\def\arraystretch{1}
\graphicspath{{figures/}}
%\usepackage{subfigure} % for subfigures
\usepackage{amsmath} % for math spacing
%\usepackage{amssymb} % for math spacing
%\usepackage{url} % Hyphenation of URLs.
\usepackage{lscape} % Useful for wide tables or figures.
\usepackage[justification=raggedright]{caption} % makes captions ragged right - thanks to Bryce Lobdell
%\usepackage[font=small,labelfont=bf]{caption}
\usepackage[acronym,toc]{glossaries} % acronyms inclusion
\usepackage{color,soul}
%\makeglossary
\usepackage{xspace}
\usepackage{float}
\usepackage{subcaption}
\newcommand{\Cyclus}{\textsc{Cyclus}\xspace}%
\newcommand{\Cycamore}{\textsc{Cycamore}\xspace}%
\newcommand{\deploy}{\texttt{d3ploy}\xspace}%
%\glspatchtabularx
\usepackage{amsmath}%
\usepackage{MnSymbol}%
\usepackage{wasysym}%
\usepackage{adjustbox}
\usepackage{enumitem}
\usepackage{tkz-euclide}
\usepackage{tikz}
\usetikzlibrary{positioning, arrows, decorations, shapes}
\usetikzlibrary{shapes.geometric,arrows}
\def\checkmark{\tikz\fill[scale=0.4](0,.35) -- (.25,0) -- (1,.7) -- (.25,.15) -- cycle;}
\definecolor{illiniblue}{HTML}{B1C6E2}
\definecolor{illiniorange}{HTML}{f8c2a2}
\definecolor{fhrblue}{HTML}{0000ff}
\definecolor{fhrgrey}{HTML}{808080}
\definecolor{fhrblack}{HTML}{040404}
\definecolor{fhrred}{HTML}{f10a0a}
\definecolor{fhrgreen}{HTML}{2f6d39}
\definecolor{fhryellow}{HTML}{fdfe36}
\definecolor{fhrpink}{HTML}{ffb8c5}
\definecolor{fhrorange}{HTML}{ffa500}
\definecolor{fhrpurple}{HTML}{800080}
\definecolor{pink}{HTML}{e2b1c2}
\definecolor{green}{HTML}{c2e2b1}
\definecolor{purple}{HTML}{b9b1e2}
\tikzstyle{loblock} = [rectangle, draw, fill=illiniorange,
text width=15em, text centered, rounded corners, minimum height=3em]
\tikzstyle{lbblock} = [rectangle, draw, fill=illiniblue,
text width=15em, text centered, rounded corners, minimum height=3em]
\tikzstyle{oblock} = [rectangle, draw, fill=illiniorange,
text width=10em, text centered, rounded corners, minimum height=3em]
\tikzstyle{bblock} = [rectangle, draw, fill=illiniblue,
text width=10em, text centered, rounded corners, minimum height=3em]
\tikzstyle{arrow} = [thick,->,>=stealth]
\tikzstyle{bbblock} = [rectangle, draw, fill=illiniblue,
text width=1em, text centered, rounded corners, minimum height=1em]
\tikzstyle{boblock} = [rectangle, draw, fill=illiniorange,
text width=1em, text centered, rounded corners, minimum height=1em]
\tikzstyle{b72block} = [rectangle, draw, fill=illiniblue,
text width=7.3em, text centered, rounded corners, minimum height=2em]
\tikzstyle{b82block} = [rectangle, draw, fill=illiniblue,
text width=8em, text centered, rounded corners, minimum height=2em]
\tikzstyle{b223block} = [rectangle, draw, fill=illiniblue,
text width=22em, text centered, rounded corners, minimum height=3em]
\usepackage[document]{ragged2e}
\usepackage{booktabs}% http://ctan.org/pkg/booktabs
\newcommand{\tabitem}{~~\llap{\textbullet}~~}
\usepackage{hyperref}
\hypersetup{hidelinks}
\usepackage{minted}
% Uncomment the appropriate one of the following four lines:
%\msthesis
\phdthesis
%\otherdoctorate[abbrev]{Title of Degree}
%\othermasters[abbrev]{Title of Degree}
\title{Fluoride-Salt-Cooled High-Temperature Reactor Design Optimization with Evolutionary Algorithms}
\author{Gwendolyn J.Y. Chee}
\department{Nuclear, Plasma, and Radiological Engineering}
\degreeyear{2021}
% Advisor name is required for
% - doctoral students for the ProQuest abstract
% - master's students who do not have a master's committee
%\advisor{Professor Kathryn D. Huff}
% Uncomment the \committee command for
% - all doctoral students
% - master's students who have a master's committee
\committee{Assistant Professor Kathryn D. Huff, Chair \\
Research Scientist Madicken Munk \\
Associate Professor Tomasz Kozlowski \\
Professor James F. Stubbins \\
Research Assistant Professor Huy Trong Tran}
\begin{document}
\include{acros}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% TITLE
%
\maketitle
\justify
\parindent 2em%
%\frontmatter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ABSTRACT
%
\begin{abstract}
\input{abs}
\end{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% TABLE OF CONTENTS
%
\tableofcontents
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% LIST OF TABLES
%
% The List of Tables is not strictly necessary. Omitting the List of Tables will
% simplify the thesis check and reduce the number of corrections.
%\listoftables
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% LIST OF FIGURES
%
% The List of Figures is not strictly necessary. Omitting the List of Figures will
% simplify the thesis check and reduce the number of corrections.
%\listoffigures
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% LIST OF ABBREVIATIONS
%
% The List of Abbreviations is not strictly necessary.
%\chapter{LIST OF ABBREVIATIONS}
%\printacronyms
%\begin{symbollist*}
%\item[MSBR] Molten Salt Breeder Reactor
%\item[MSR] Molten Salt Reactor
%\item[ORNL] Oak Ridge National Laboratory
%\end{symbollist*}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% LIST OF SYMBOLS
%
%\begin{symbollist}[0.7in]
%\item[$\tau$] Time taken to drink one cup of coffee.
%\end{symbollist}
\mainmatter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% INSERT REAL CONTENT HERE
%
\include{intro}
\include{lit-review}
\include{fhr-benchmark}
\include{rollo}
\include{rollo-demo}
\include{proposal}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% APPENDIX
%
%\appendix
%\include{apx}
\backmatter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% BIBLIOGRAPHY
%
\bibliographystyle{plainurl}
\bibliography{2021-chee-prelim}
\end{document}
| {
"alphanum_fraction": 0.6764377443,
"avg_line_length": 34.7766990291,
"ext": "tex",
"hexsha": "e34402e883fbd16663ccd3cbc35e7516bf30c364",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e28fae5f64ab4a4464d73b4cc42cb5c767754e5a",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "gwenchee/2021-chee-prelim",
"max_forks_repo_path": "docs/2021-chee-prelim.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e28fae5f64ab4a4464d73b4cc42cb5c767754e5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "gwenchee/2021-chee-prelim",
"max_issues_repo_path": "docs/2021-chee-prelim.tex",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e28fae5f64ab4a4464d73b4cc42cb5c767754e5a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "gwenchee/2021-chee-prelim",
"max_stars_repo_path": "docs/2021-chee-prelim.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2056,
"size": 7164
} |
\documentclass{article}
\usepackage{verbatim}
\title{Introduction to Ecru}
\author{Roman Bogorodskiy ([email protected])}
\date{}
\begin{document}
\maketitle
\tableofcontents
\section{About}
Ecru is a command-line LiveJournal client which pretends to be flexible
and follow Unix way.
You might consult README file in the Ecru distribution in order to get
information about dependencies and build process.
\section{Getting started}
\subsection{Configuration}
Ecru comes with a tool {\tt ecru-config} which is responsible for
configuration related issues. To generate initial configuration schema,
execute {\tt ecru-config } with {\tt -g } argument: { \tt ecru-config -g }. It
will prompt for your livejournal username and md5 hash of the password.
On Linux systems you can generate md5 hash of your password using
{\tt md5sum} utility and on FreeBSD you might use {\tt md5 -s}. So, in order
to avoid interactivity, you might do the following:
\begin{verbatim}
ecru-config -g -u username -p `md5 -s mypass`
\end{verbatim}
Consider that it might be insecure to leave your plain text password in the
shell history.
A new directory {\tt ecru.new} will be created by {\tt ecru-config}. Just
{\tt cp -r } it to {\tt \verb+~+/.ecru} and you're done with it.
\subsection{Posting}
A tool called {\tt ecru-post } is responsible for posting new entries. It can be used
either in interactive or command line mode. Let's start with the interactive mode first.
If you type {\tt ecru-post } without any argument it will load {\tt \$EDITOR}. As soon as you
finish typing, save the file and quit from the editor, the post will be submitted and
{\tt ecru-post } will return an URL of the new post.
To add subject to the post, you should add a 'subject' keyword to the top of the text, like:
\begin{verbatim}
subject: hey, this is a subject
the text of a post goes here
one more line just to make it look more real
\end{verbatim}
You might add other keywords like current mood and stuff, for example:
\begin{verbatim}
subject: another post
current_mood: good
current_music: The Beatles - Let It Be
Some very kewlz post, YA RLY.
\end{verbatim}
Please note a blank line separating headers part and the body, it's required. It could be skipped
only if your post has no headers at all.
As it was mentioned before, it's possible to post in non-interactive mode. It could be done this way:
\begin{verbatim}
echo "body of the post" | ecru-post -f - \
-s "subject of the post" \
-Dcurrent_mood=good -Dcurrent_music="`music.sh`"
\end{verbatim}
It will post a new entry without invoking editor. As you might have noticed, {\tt -s } is used to set the subject
and {\tt -D} is used to set properties.
Note that you can use {\tt -s} and {\tt -D} in an interactive mode as well, however command line arguments has
lower priority than the ones defined in the text. E.g. if you executed {\tt ecru-post -s "some subject"} and
didn't provide 'subject:' keyword in the text, the subject of you post will be "some subject". However, if
you execute {\tt ecru-post -s "some subject"} and add "subject:" keyword to the text, like "subject: cooler
subject", the subject of you post will be "cooler subject". The same is valid for {\tt -D} as well.
\subsection{About security property}
Property "security" has three possible values: "public" (default), "friendsonly" and "private". So in order
to make a post visible only to friends, "security" property should be set to "friendsonly", i.e. a header
"security: friendsonly" should be added or {\tt -Dsecurity=friendsonly} option passed.
\subsection{Templates}
Before {\tt ecru-post} invokes the editor it loads the contents of the {\tt \verb+~+/.ecru/templates/default} file.
That's where 'subject:' line comes from with the default configuration. You might alter
{\tt \verb+~+/.ecru/templates/default} template for your needs. You might create new templates and place it into
{\tt \verb+~+/.ecru/template/} directory and pass their name to {\tt -t} arg for {\tt ecru-post}. For example,
if you created {\tt \verb+~+/.ecru/template/mytemplate} you call {\tt ecru-post -t mytemplate} to use it.
\subsection{Post operations: list, edit, delete}
A tool {\tt ecru-list} could be used to last of recent posts. In first column it shows item id (important!),
then few first chars from the post body and post time. If you pass {\tt -s} argument, it will show post url
as well.
You can delete posts using {\tt ecru-delete} posts, e.g. if you want to delete posts with ids 10 and 11 you
do {\tt ecru-delete 10 11}. As a reminder: you can look up an id in the first column of {\tt ecru-list}
output.
To edit post with id 10 you need to execute {\tt ecru-edit 10}.
To obtain an info about a post with id 10 you need to execute {\tt ecru-info 10}. By the way, there's a special
id "-1" which always refers to the latest post in your journal. For example: {\tt ecru-info -- -1} will
show an info about the latest post. Note "--" - it's used to getopt didn't think "-1" is an argument.
\section{Advanced topics}
\subsection{Configuration profiles}
Ecru supports having several config profiles (which could be useful if you have few accounts). If you want to
add a new profile, just create a new configuration file in {\tt \verb+~+/.ecru/}. Its name should end with
{\tt .conf}.
Now if you run {\tt ecru-config -l} you should see a list of configurations, in our example it should be
'default.conf' marked with asterisk and the configuration file you just created. Asterisk (*) marks
currently active configuration profile. To change current configuration file you should do (assuming
you named file example.conf):
\begin{verbatim}
ecru-config -s example.conf
\end{verbatim}
Now {\tt ecru-config -l} should show example.conf marked with asterisk.
\subsection{Hooks}
\subsubsection{Introduction}
Ecru supports pre- and post- edit hooks. What does it mean? Before and after invoking an editor,
ecru checks for executable files in {\tt \verb+~+/.ecru/hooks/pre} for pre- hooks and
{\tt \verb+~+/.ecru/hooks/post} for post hooks and executes them, passing filename of with your
post as the first argument. So you can do whatever you want with it. Some typical usages as
I see them: replace stuff like 'current\_music: `get\_my\_music` with executon result of
{\tt get\_my\_music} app (check the {\tt hooks} directory of the ecru distrubution to find
a script that does at; it's possible to use it for introducing various types of macroses, etc.
\subsubsection{Writing hooks in Python}
Hooks for ecru can be implemented in various languages, but {\tt Python} seems to be a good choice.
If you're writing a script in Python, you might want to use some help routine. There's a
{\tt python} directory in the top-level ecru sources directory, it contains a helper module for
implementing hooks in python. To install it, execute:
\begin{verbatim}
sudo python setup.py install
\end{verbatim}
in the {\tt python} directory. You can see usage example by viewing {\tt hooks/00\_markdown.py} script
which implements markdown support.
\section{VIM configuration}
If you're using vim, the following tips might be useful to you. Add the following to your {\tt .vimrc}:
\begin{verbatim}
autocmd BufRead /tmp/ecru* call EcruStuff()
function EcruStuff()
set wrap linebreak textwidth=70
set ft=html
set spell
set backup " ecru is not stable yet ;]
endfunction
\end{verbatim}
First line sets to call {\tt EcruStuff()} function if the filename matches {\tt /tmp/ecru*} template.
The {\tt EcruStuff} function sets line length to 70 (makes reading more easy) and sets file type to
html to get html syntax hightlighted in posts.
\end{document}
| {
"alphanum_fraction": 0.7260584677,
"avg_line_length": 49.9119496855,
"ext": "tex",
"hexsha": "7fab3a3ea8a99daf6d4c8a2110c26242b57f7e79",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-13T06:57:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-13T06:57:41.000Z",
"max_forks_repo_head_hexsha": "b798d5a0cfa481af5af3974a950558dcf1dd37dc",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "novel/ecru",
"max_forks_repo_path": "doc/ecru-introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b798d5a0cfa481af5af3974a950558dcf1dd37dc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "novel/ecru",
"max_issues_repo_path": "doc/ecru-introduction.tex",
"max_line_length": 117,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b798d5a0cfa481af5af3974a950558dcf1dd37dc",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "novel/ecru",
"max_stars_repo_path": "doc/ecru-introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2016-05-08T13:29:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-08T13:29:29.000Z",
"num_tokens": 2093,
"size": 7936
} |
% !TEX TS-program = lualatex
\documentclass[10pt, stock]{memoir}
\usepackage{fontspec}
\setmainfont{MyriadPro}
\usepackage{fapapersize}
\usefapapersize{148mm,210mm,15mm,15mm,20mm,15mm}
\usepackage{afterpage}
\usepackage{hyperref}
\usepackage{graphicx}
\usepackage{xcolor}
\usepackage{ltablex}
\usepackage{parskip}
\frenchspacing
\setlength{\parindent}{0pt}
\setlength{\parskip}{10pt}
\setsecnumdepth{subsection}
%% Idioms %%
\hyphenation{Com-put-er-Craft}
\hyphenation{O-pen-Com-put-ers}
\hyphenation{ROM-BASIC}
\definecolor{black}{HTML}{000000}
\definecolor{white}{HTML}{FFFFFF}
\definecolor{dimgrey}{HTML}{555555}
\definecolor{brightgrey}{HTML}{AAAAAA}
\definecolor{yellow}{HTML}{FFFF00}
\definecolor{orange}{HTML}{FF6600}
\definecolor{red}{HTML}{DD0000}
\definecolor{magenta}{HTML}{FF0099}
\definecolor{purple}{HTML}{330099}
\definecolor{blue}{HTML}{0000CC}
\definecolor{cyan}{HTML}{0099FF}
\definecolor{lime}{HTML}{55FF00}
\definecolor{green}{HTML}{00AA00}
\definecolor{darkgreen}{HTML}{006600}
\definecolor{brown}{HTML}{663300}
\definecolor{tan}{HTML}{996633}
\newcommand{\unemph}[1]{\textcolor{brightgrey}{#1}}
% Title styling
\pretitle{\begin{flushright}\HUGE}
\posttitle{\par\end{flushright}\vskip 0.5em}
% new sections are new page
\let\oldsection\chapter
\renewcommand\chapter{\clearpage\oldsection}
% chapter title -- no now page after
%\renewcommand\chapterheadstart{} % kill the drop
\renewcommand\afterchapternum{\vskip 0.5em} % space between number and title
\makeatletter
\renewcommand\memendofchapterhook{%
\newpage\m@mindentafterchapter\@afterheading}
\makeatother
\renewcommand{\thefootnote}{\fnsymbol{footnote}}
\aliaspagestyle{part}{empty}
\aliaspagestyle{chapter}{empty}
% The title
\title{\textbf{THE MANPAGE} \\ \vspace{7mm} \large For the Game \emph{Terrarum}\quad ·\quad First Edition}
\date{}
\author{}
\hypersetup{
pdfauthor={Terrarum Developers},
pdftitle={THE MANPAGE},
unicode=true
}
\begin{document}
\begin{titlingpage}
\maketitle{}
\end{titlingpage}
\setcounter{page}{3}
\tableofcontents*
\chapter{Main Applications}
\section{DSH}
\input{man_dsh}
\section{MSH}
\input{man_msh}
\chapter{References}
\afterpage{\pagestyle{empty}\null\newpage}
\end{document} | {
"alphanum_fraction": 0.7596239928,
"avg_line_length": 18.7731092437,
"ext": "tex",
"hexsha": "ab58fe38a7a0576b7ecb6c4b33fb563df179f304",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8502b513963c225c8f8a4a0f1a61c25b5b20125c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "curioustorvald/Terrarum",
"max_forks_repo_path": "work_files/usermanuals/themanpage/themanpage.tex",
"max_issues_count": 31,
"max_issues_repo_head_hexsha": "5781f5cd00672fa7a624849f3e5d2b0cf206666f",
"max_issues_repo_issues_event_max_datetime": "2019-12-04T06:44:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-04T18:02:13.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "minjaesong/Terrarum",
"max_issues_repo_path": "work_files/usermanuals/themanpage/themanpage.tex",
"max_line_length": 106,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "6697f2f5cd77e2fb108ecdab4141b9d23086a4fa",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "curioustorvald/Terrarum",
"max_stars_repo_path": "work_files/usermanuals/themanpage/themanpage.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T08:57:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-13T10:00:11.000Z",
"num_tokens": 747,
"size": 2234
} |
% Created 2022-03-31 Thu 11:49
% Intended LaTeX compiler: pdflatex
\documentclass[a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{longtable}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{capt-of}
\usepackage{hyperref}
\author{Nathaniel Dearce}
\date{\today}
\title{Cv}
\hypersetup{
pdfauthor={Nathaniel Dearce},
pdftitle={Cv},
pdfkeywords={},
pdfsubject={},
pdfcreator={Emacs 27.2 (Org mode 9.6)},
pdflang={English}}
\begin{document}
\#+EXPORT\textsubscript{FILE}\textsubscript{NAME} ./cv.pdf
\name{Nathan Dearce}
\personalinfo\{\%
\homepage{www.cs.odu.edu/~cs_ndear001}
\email{ndear001.odu.edu}
\phone{+757 389 3922}
\location{Norfolk, VA, USA}
\github{ndbdrc}
\}
\makecvheader
\section{Education}
\label{sec:org53708e2}
\end{document}
| {
"alphanum_fraction": 0.7350332594,
"avg_line_length": 22,
"ext": "tex",
"hexsha": "cc034ad763510fb568f207a8dbdb6d36f354a47d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a3edd1a6b5acd641459061fc200c081e623c79ff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nbdrc/nbdrc.github.io",
"max_forks_repo_path": "assets/pdf/cv.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a3edd1a6b5acd641459061fc200c081e623c79ff",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nbdrc/nbdrc.github.io",
"max_issues_repo_path": "assets/pdf/cv.tex",
"max_line_length": 58,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a3edd1a6b5acd641459061fc200c081e623c79ff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nbdrc/nbdrc.github.io",
"max_stars_repo_path": "assets/pdf/cv.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 318,
"size": 902
} |
\documentclass[output=paper]{langscibook}
\ChapterDOI{10.5281/zenodo.5524292}
\author{David Pesetsky\orcid{0000-0003-1530-9230}\affiliation{Massachusetts Institute of Technology}}
\title{Tales of an unambitious reverse engineer}
\abstract{This paper suggests a non-standard explanation for the limited range of semantics available to constructions in which certain elements of a normal finite TP are phonologically absent. These include English \textsc{aux}-drop questions (\citealt{fitzpatrick2006deletion}) and infinitival clauses (\citealt{Wurmbrand:2014aa}), where the proposal suggests an answer to some particularly vexing questions arising from the derivational (“exfoliation”) theory of infinitivization that I have advanced elsewhere (\citealt{Pesetsky:2019aa}). The core idea attributes apparent restrictions on the constructions themselves to restrictions on a hearer’s creativity in positing possible identities for material deleted in the speaker’s derivation (with “hearer” understood as an abstract concept, including self-monitoring by the speaker). Specifically, the hearer may consider only the minimally semantic contentful possibilities compatible with the morphosyntactic environment, in obedience to a \emph{principle of unambitious reverse engineering} (PURE).}
\begin{document}
\SetupAffiliations{mark style=none}
\maketitle
\section{Introduction}
Every time I hear a talk by Susi Wurmbrand, discuss syntax with her, or read one of her papers, my view of the world changes. Not only do I learn about new discoveries and novel approaches to complex problems, I myself am inspired to think new thoughts and explore new topics I never thought of exploring. She is one of the great linguists of our time, and it is our privilege to be her contemporaries. She has the important gift of spotting the inner simplicity in ridiculously tangled puzzles, and the equally important gift of discovering new puzzles for all of us, thus making our intellectual lives simultaneously easier and harder (just as a great colleague should). It is my personal good fortune that the two of us share several research interests -- most notably \textit{finiteness} -- which means that I have been especially able to benefit from these gifts of hers.
By an extraordinary coincidence, the invitation to contribute to this volume arrived shortly after I had prepared several of Wurmbrand's recent papers on non-finite complementation for a graduate seminar that I was co-teaching with my colleague Athulya Aravind, and was contemplating writing a paper in direct response to one of these papers. This portion of the seminar was of particular importance to me as the author of recent work (\citealt{Pesetsky:2019aa}) that argued for a crucially \textit{derivational} theory of finiteness. In this work, I revived the idea originated by \citet{Lees1963} and Rosenbaum \citeyearpar{Rosenbaum:1965aa,Rosenbaum1967}, that non-finite clauses are derived from full and finite clauses in the course of the syntactic derivation, as a response to cross-clausal processes such as raising and control. Could this proposal of mine be reconciled (I wondered) with Wurmbrand's compelling findings about such clauses, which presupposed the opposite view: that non-finite clauses are non-finite from the outset, and have properties (most notably, tenselessness) that distinguish them throughout the derivation from their finite counterparts?
A centerpiece of that class was \citet{Wurmbrand:2014aa}, a brilliant paper that explores the full range of English non-finite complement clauses formed with \textit{to}, and makes a strong case for their deep \textit{tenselessness}. As I prepared that work for class, however, it seemed to me that there might be a way to retain all the logical threads that Wurmbrand traced from construction to construction in support of her central claim, while reaching a very different conclusion.\footnote{As noted below, it turned out that this was not the first time this realization had dawned on me, at least for one class of constructions discussed in Wurmbrand's paper.} The origins of the paper that you are reading now can be traced directly to that class and the fruitful discussions it engendered.
In her paper, Wurmbrand argued for the tenselessness of English infinitival complements by showing that no matter what element one might propose as the finite counterpart of infinitival tense, the infinitival version differs in its behavior in multiple respects, in ways explainable as consequences of tenselessness. I will suggest that a different conclusion is at least equally plausible: that the semantics of tense in the English infinitives studied by Wurmbrand fails to correspond to any single finite counterpart because it actually ranges in principle across the full gamut of possible finite counterparts (as expected if infinitives are derived from finite clauses), behaving in some cases like a present or past modal, in other cases like non-modal present or past tense, and in still other cases (closely following Wurmbrand's discussion) copying its tense value from the embedding verb.\thispagestyle{empty}
Of course, it is also well-known that the semantics of English infinitival clauses is much more restricted in its possibilities than is the semantics of finite clauses. So while it might be the case that the semantics of infinitival tense does range \textit{in principle} across the full gamut of finite possibilities, in reality, the actual possibilities are tightly constrained. While Wurmbrand argues that these constraints reflect deep tenselessness, I will argue that they are actually extra-grammatical, reflecting strong limitations on the ability of a hearer to “reverse engineer” the derivation behind a speaker's utterance, precisely when obligatory elements such as tense have been generated but not phonologically interpreted. This explanation for the limitations on the interpretation of infinitival complements, I will suggest, dovetails with an explanation for the properties of a seemingly different puzzle studied by \citet{fitzpatrick2006deletion}, for which he proposed an ingenious but ultimately self-contradictory solution that the approach suggested here resolves. I will also briefly suggest that this “reverse engineering” mode of explanation for the phenomena charted by Wurmbrand suggests new approaches to other phenomena as well.
The data discussed in this paper are almost entirely drawn from \citet{fitzpatrick2006deletion}, \citet{Pesetsky:2019aa}, and \citet{Wurmbrand:2014aa}. This is thus a “new perspectives” paper, and not a “new empirical discoveries” paper. It is the dovetailing with Fitzpatrick's puzzles and the proposals that I argued for in \citet{Pesetsky:2019aa} that may argue for my perspective over Wurmbrand's, not (at least for now) new empirical predictions of the new approach. As I noted at the outset, Wurmbrand's work does not only teach, it also inspires. I am delighted to offer this paper as a modest but characteristic example.
\section{The factative effect in English \textsc{aux-}drop}
\subsection{Fitzpatrick's discoveries}
\citet{fitzpatrick2006deletion} studied a type of yes/no question, common in spoken Standard English, in which the auxiliary verb that is moved to C is omitted. He called this the \textit{\textsc{aux}-drop} construction:
\needspace{5\baselineskip}
\pex<auxdrop>
\ptxt{\textsc{aux}-drop (English)}
\a Anybody want a hot dog? (= \emph{Does anybody want a hot dog?})
\a Anyone seen John today? (= \emph{Has anyone seen John today?})
\a Anybody going to the game? (= \emph{Is anybody going to the game?})
\trailingcitation{(\citealt[400, ex. 1]{fitzpatrick2006deletion})}
\a Anybody accused of a crime today? (= \emph{Was anybody accused of a crime today?)}
\xe
\noindent Fitzpatrick provides several arguments that an \textsc{aux}-drop clause is generated as a full interrogative CP in which an auxiliary verb moves from T to C, just as it does in the more formal non-\textsc{aux-}drop counterparts given in parentheses above. Fitzpatrick shows first that \textsc{aux}-drop questions are at least as large as TP, by noting that they may contain negation (\textit{Anybody not like John?}) and higher adverbs (\textit{Everyone probably coming tomorrow?}). A subject pronoun in an \textsc{aux}-drop question must be nominative (\textit{He here yet?}\slash\relax*\textit{Him here yet}), as expected if the pronoun is a normal subject in a finite T (despite the absence of any overt finite auxiliary verb or exponent of T).
Fitzpatrick then proceeds to show that an \textsc{aux}-drop question is even larger than TP, and in particular, that it is generated as a full CP, in which an auxiliary verb in T has moved to C. As he notes first, if the interpretation of examples like (\getref{auxdrop}a--d) as yes/no questions relies on semantics crucially provided by an interrogative C (as in most accounts of such questions), an \textsc{aux-}drop clause must have been generated as a CP. An additional argument rests on the observation that licensing a negative polarity item like \textit{anyone} and \textit{anybody} in examples (\getref{auxdrop}a--d) correlates with movement of \textsc{aux} to C in less controversial matrix yes/no questions. For example, \textit{how come} differs from its near-synonym \textit{why} in not triggering \textsc{aux}-to-C movement, and also differs in not licensing NPIs:
\pex<howcome>
\ptxt{\textit{Why} vs. \textit{how come} as NPI licenser}
\a Why did you ever give anyone that?
\a \ljudge{*}How come you ever gave anyone that?
\trailingcitation{(\citealt[409, adapted from ex. 21]{fitzpatrick2006deletion})}
\xe
\noindent Likewise, though \textit{You gave him that?} with a rising intonation but no movement of \textsc{aux} to C may be understood as a question requiring a \textit{yes} or \textit{no} answer (perhaps reflecting a variety of interrogative C that fails to trigger \textsc{aux-}to-C movement), here too an NPI is not licensed:
\pex<auxinsitu>
\ptxt{\textsc{aux}-to-C vs. its absence correlates with NPI licensing}
\a Did you ever give anyone that?
\a \ljudge{*}You ever gave anyone that?
\trailingcitation{(\citealt[409, adapted from ex. 20d--e]{fitzpatrick2006deletion})}
\xe
\noindent In addition, both \textsc{aux}-drop and non-\textsc{aux}-drop yes/no questions show the effects of what is arguably an adjacency requirement preventing an adverb from intervening between the subject and the auxiliary verb that has moved to C, as (\getref{nowadjacency}\getref{nowadjacency.auxadjacency}--\getref{nowadjacency.auxdropadjacency}) show. This requirement (perhaps a case filter effect) is not found when \textsc{aux-}to-C movement has not taken place, as the embedded question in (\getfullref{nowadjacency.noadjacency}) shows:\footnote{Fitzpatrick does not ground either the correlation between NPI licensing and \textsc{aux}-to-C movement or the \textsc{aux-}subject adjacency condition in deeper principles, with the result that these arguments might weaken as we learn more about these effects. Note that an embedded yes/no question introduced by \textit{whether} or \textit{if} does license a subject NPI, despite the absence of \textsc{aux}-to-C, but at the same time permits an adverb to intervene between C and the subject (\textit{I wonder whether/if now everyone is aware of the problem}), a difference between the two diagnostics that will need an explanation.}
\pex<nowadjacency>
\ptxt{\Aux{}-subject adjacency condition with and without \Aux{}-drop}
\a<auxadjacency> \ljudge{*}Is now everyone aware of the problem?
\a<auxdropadjacency> \ljudge{*}Now everyone aware of the problem?
\a<noadjacency> I wonder whether (now) everyone (now) is aware of the problem.
\trailingcitation{(\citealt[408, adapted from ex. 18--19]{fitzpatrick2006deletion})}
\xe
\noindent Constructions that arguably require semantic parallelism with a full CP provide additional arguments (not from Fitzpatrick). For example, an \textsc{aux-}drop question may be disjoined with a negative alternative in which \textsc{aux} has moved to C, as (\getref{disjunction}) shows, and may provide an antecedent for null complement anaphora where the non-null version would be a full-CP \textit{whether} question:
\pex<disjunction>
\ptxt{Disjunction with full CP alternative}
You give your talk or didn't you?
\xe
\pex[aboveexskip=0pt]<NCA>
\ptxt{Antecedent for null complement anaphora of a \textit{whether} question}
\a You giving your talk tomorrow, or don't you know yet Δ?
\a A: Anyone want coffee? \\B: I'll find out Δ.
\xe
\noindent If these arguments are correct, \textsc{aux}-drop questions are generated as normal interrogative CPs in which the auxiliary verb moves to C, but some property of the grammar permits the auxiliary verb in C to remain unpronounced.
Fitzpatrick makes several further crucial observations. First, he observes that an auxiliary may be silenced by \textsc{aux}-drop only if it has raised to C. A declarative clause like \textit{It has given me a headache}, for example, cannot lose its auxiliary: *\textit{It given me a headache}. The same is true of an embedded yes/no question in which the auxiliary remains in situ: *\textit{I wonder whether Mary written a letter}. Furthermore, \textsc{aux}-drop is limited to root clauses. The examples in (\getref{rootonly}) below also show that movement of \textsc{aux} to C in the antecedent of a counterfactual conditional, an embedded environment, does not have a variant with \textsc{aux}-drop. The additional examples in (\getref{rootonly2}) show that when \textsc{aux} moves to C in an embedded yes/no question, which is possible in informal spoken English (McCloskey \citeyear{McCloskey:2006}), \textsc{aux}-drop is also impossible.\footnote{The binding of \textit{she} by \textit{each actress} in this pair helps ensure that the yes/no question is normally embedded, and is not a quotation (cf. \citealt{fitzpatrick2006deletion} p. 420 fn. 24).}
\pex<rootonly>
\ptxt{\Aux{}-drop only at the root: counterfactual inversion}
\a Had you written a better speech, Sue would probably have won the election.
\a *You written a better speech, Sue would probably have won the election.
\trailingcitation{(\citealt[409, adapted from ex. 20d--e]{fitzpatrick2006deletion})}
\xe
\pex[aboveexskip=0pt]<rootonly2>
\ptxt{\Aux{}-drop only at the root: T-to-C movement in embedded questions}
\a Each actress wanted to know had she been chosen for the part or not.
\a \ljudge{*}Each actress wanted to know she been chosen for the part or not.
\xe
Fitzpatrick concludes that what yields the \textsc{aux}-drop construction is the \textit{optional non-interpretation by the phonology of the top layer of a fully built syntactic structure}. Though information about the entire structure dominated by the highest TP gets sent to the phonology during the course of the derivation, the contents of the root CP are not. This option makes sense, as Fitzpatrick notes, in the model of phase-by-phase interpretation proposed by \citet{Chomsky2001}, according to which the merging of each phrase head triggers phonological interpretation of the phase head's \textit{complement}, as well as semantic interpretation of the complement, to which I return below.\largerpage
\begin{sloppypar}
The crucial case for the \textsc{aux}-drop construction concerns the merger of C, which triggers interpretation of its complement TP. Chomsky's \citeyearpar{Chomsky2001} regime for phase-by-phase interpretation of syntactic structure works hand in hand with the additional hypothesis that constituents interpreted by this process are impenetrable for the remainder of the syntactic derivation. This impenetrability hypothesis both permits and requires apparent instances of extraction from phasal constituents to proceed through the phase edge, merging as a specifier of each phasal head on the path to its final destination.
\end{sloppypar}
As Fitzpatrick notes, however, this proposal comes at a cost: the theory must posit a special clean-up rule to interpret the final CP layer at the root. This fact lies at the heart of Fitzpatrick's account of \textsc{aux-}drop: namely, “that this extra operation need not apply in all cases, and that \textsc{aux}-drop is one case where it fails to apply.” This proposal, however, raises serious questions concerning semantic interpretation. These questions embroil Fitzpatrick's account in a contradiction for which this paper suggests a new resolution. It is this resolution, in turn, that will serve as a model for a reinterpretation of Wurmbrand's \citeyearpar{Wurmbrand:2014aa} findings concerning the tense properties of English infinitives.
Fitzpatrick discovered a remarkable semantic property of \textsc{aux}-drop that makes immediate sense if phonological and semantic interpretation apply together as a unit (Chomsky's \citeyear{Chomsky2001a} rule of ``\textsc{transfer}''). In particular, \textsc{aux}-drop sentences show a phenomenon made famous by Déchaine \citeyearpar{Dechaine1991}, which she called the \textit{factative effect} (adapting terminology from Africanist linguistics credited to Welmers \citeyear{Welmers:1973aa}, 346). In some languages, a clause may lack overt tense marking but nonetheless be interpreted as tensed. Crucially, how tense is understood in such clauses depends on whether its main predicate is eventive (episodic) or non-eventive (e.g. stative). If the predicate is non-eventive, the tense is understood as \textsc{present}, but if it is eventive, the tense may or must be understood as \textsc{past}. The contrast in (\getref{HaitianFactative}) demonstrates the factative effect in Haitian declarative clauses. Fitzpatrick cites additional examples from Fɔ̀ngbè, Yoruba, and Igbo. Example (\getref{AuxDropFactative}) illustrates Fitzpatrick's key discovery: a similar effect at work in the English \textsc{aux}-drop construction:
\NumTabs{4}
\pex<HaitianFactative>
\ptxt{Factative effect: Haitian Kreyòl}
\a
\begingl
\gla Pyè vann bèf yo. {\nogloss{\tabto{2.2in} \textit{eventive→past}}}//
\glb Pyè sell cattle \Det{}//
\glft `Pyè sold the cattle.'//
\endgl
\a
\begingl
\gla Sisi renmen chat mwen. {\nogloss{\tabto{2.2in} \textit{non--eventive→present}}}//
\glb Sisi like cat my//
\glft `Sisi likes my cat.'//
\endgl
\xe
\pex[aboveexskip=0pt]
<AuxDropFactative>
\ptxt{Factative effect: English \Aux{}-drop}
\a
\begingl
\gla You sell that cattle? {\nogloss{\tabto{2.2in} \textit{eventive→past}}}//
\glft `Did you sell that cattle?'//
\endgl
\a
\begingl
\gla You like my cat? {\nogloss{\tabto{2.2in} \textit{non--eventive→present}}}//
\glft `Do you like my cat?'\trailingcitation{(\citealt[414, ex. 27a--d]{fitzpatrick2006deletion})}//
\endgl
\xe
\noindent Following Déchaine \citeyearpar{Dechaine1991}, Fitzpatrick suggested that the factative effect arises when no tense features are available to semantic interpretation. For languages like Haitian that show the factative effect in simple declarative clauses, Fitzpatrick posited a semantically underspecified but syntactically present T in sentences like (\getref{HaitianFactative}a--b). By contrast, the English \textsc{aux-}drop construction involves a fully-specified T that moves to C as part of the auxiliary verb, by the normal process that builds matrix non-\textsc{aux}-drop questions. Fitzpatrick also proposed the following: just as the phonological properties of the tensed auxiliary in C are not submitted to phonological interpretation in the \textsc{aux-}drop constructions, its semantic properties are also not submitted to semantic interpretation. As far the semantics is concerned, therefore, the tense specification of T might as well never have been generated in the first place (even though it was). Because the contents of C are not interpreted, an \textsc{aux}-drop sentence is understood as if it entirely lacked tense, yielding the same factative effect found in Haitian when T actually does entirely lack a tense specification.
\subsection{Reverse-engineering an \textsc{aux-}drop derivation}\label{subsec:Reverse-engineering-an-aux-drop}\largerpage
As mentioned above, Fitzpatrick's proposal ends up enmeshed in an apparent contradiction concerning the semantic interpretation of the silent material in the \textsc{aux-}drop construction. The remarkable cross-linguistic correlation highlighted by Fitzpatrick between the factative effect and the absence of overt tense does indeed suggest that the phonological suppression of a tensed auxiliary has semantic repercussions. For English \textsc{aux}-drop, however, the claim that these repercussions arise from total non-interpretation of the root CP contradicts some of the very evidence that argued that an entire CP had been built in the first place. That evidence had a crucial semantic component: normal yes/no question semantics correlating with T-to-C movement, correlating in turn with NPI licensing. Fitzpatrick noted this problem (p. 422), but left it unresolved. He also noted an additional related puzzle (p. 419): though the failure to submit the root CP layer to semantic interpretation might entail the lack of access to tense information that had been lodged in C thanks to T-to-C movement -- given the copy theory of movement -- that information should still be present on T in its original position (rendering the factative effect especially mysterious, as Carlo Geraci, personal communication, notes). Since reconstruction phenomena in other domains teach us that unpronounced earlier positions of moved elements may be semantically interpreted, it is puzzling that T-to-C movement followed by \textsc{aux-}drop should be an exception.\footnote{\citet{fitzpatrick2006deletion} offered a tentative conjecture that the components of meaning relevant to question interpretation and NPI licensing might actually be contributed by a phasal head lower than C but higher than T, a head that is submitted to semantic interpretation as the (actual) complement to C. No additional evidence was offered for the existence of such a head with the properties attribute to it, and various problems are raised by this conjecture, briefly discussed in a footnote (p. 422, fn. 26).}
\begin{sloppypar}
I believe that a different approach to Fitzpatrick's discovery can resolve these issues in a new way. This approach will turn out to have implications for the proper treatment of other clauses with “missing” content, such as nonfinite clauses.
\end{sloppypar}
Let us accept as convincing the data (mostly) from Fitzpatrick with which we began this section, which seem to show that though the contents of the CP layer in the \textsc{aux}-drop construction are not subject to phonological interpretation, they \textit{are} submitted to semantic interpretation. The interpretation of \textsc{aux}-drop clauses as yes/no questions may thus rely on the semantic properties of interrogative C, and the availability of NPI licensing in such clauses will follow from whatever factors turn out to be relevant in non-\textsc{aux-}drop yes/no questions.
What should we then make of the correlation between the phonological absence of the tensed auxiliary verb and the factative effect, which limits tense interpretation to \textsc{present} with non-eventive predicates, and \textsc{past} with eventive predicates? I propose that this correlation does not reflect the \textit{grammar} of non-pronunciation and semantic interpretation at all, but reflects an entirely different consequence of non-pronunciation. When a speaker produces an utterance based on a derivation in which elements that normally receive a phonological interpretation are unpronounced, the language system of the hearer must \textit{reverse-engineer} the speaker's derivation, supplying its own guesses concerning the identity of the unpronounced elements. If the speaker produces an \textsc{aux-}drop question, for example, missing its tensed auxiliary verb, the language system of the hearer must rely on its own resources to supply the missing auxiliary verb and tense.
But what are those resources? I propose that they are limited, as a property of the human language faculty, and that it is the tightly limited capacity of a hearer for reverse-engineering the speaker's syntactic derivation, not the architecture of that derivation itself, that lies behind the factative effect in the English \textsc{aux}-drop construction.\largerpage
Let us begin by noting, with Fitzpatrick himself (p. 14), that \textsc{aux}-drop is impossible when the auxiliary is “semantically contentful”. The auxiliary verbs whose pronunciation can be suppressed (supportive \textit{do}, perfect \textit{have}, progressive and passive \textit{be}) are those that arguably lack semantics of their own, and are inserted to satisfy independent requirements of their morphosyntactic environment (cf. \citealt{GrOnn:2021aa}, Section 3 on perfect \textit{have}). By contrast, as Fitzpatrick points out, modals that do have semantics of their own cannot be omitted as part of the \textsc{aux-}drop construction:
\pex<NoModal>
\ptxt{No modal \Aux{}-drop}
\a<a> Anyone pick up John at the airport?\\
Impossible with the meaning `Can anyone pick up John at the airport?'
\a<b> Anyone play the piano at the party tomorrow?\\
Impossible with the meaning `Will anyone play the piano at the party tomorrow?'
\trailingcitation{(\citealt[412, ex. 25a--b]{fitzpatrick2006deletion})}
\xe
\begin{sloppypar}
\noindent I propose that this fact itself should be understood as a consequence of a general limitation on the reverse-engineering of phonologically suppressed material. This limitation dictates that the reverse-engineering process must be semantically \textit{unambitious}, positing as little semantics as possible that is not unambiguously reconstructable from the input provided by the speaker. I will call this limitation the \textit{principle of unambitious reverse engineering} (PURE). In essence, PURE is a version of the much-debated principle of “recoverability of deletion” (\citealt[41]{Chomsky1964c} and \citealt[79ff]{Katz:1964}), now viewed as an extra-grammatical property of the hearer's language system attempting to parse input from a speaker.
\end{sloppypar}
\needspace{4\baselineskip}
\pex<Recoverability>
\ptxt{Principle of unambitious reverse engineering (PURE)}
When determining the identity of unpronounced material in the course of reverse-engineering a speaker's syntactic derivation, the language system of the hearer considers only the \textit{minimally semantically contentful possibilities} compatible with the morphosyntactic environment.
\xe
I use the phrase “language system of the hearer” to emphasize that the discussion does not concern conscious decisions of the hearer, but rather the automatic behavior of the cognitive systems that parse linguistic input, determine its meaning, and communicate this information to other cognitive systems. In what follows, I will sometimes refer directly to the hearer's language system with the word “hearer” to keep the prose simple, but it is the hearer's language system that I intend throughout. I also assume that a speaker's language system self-monitors in the process of speech production, functioning as hearer as well as speaker, so that the planning of an utterance takes into account the restricted range of interpretations a hearer is permitted to entertain by the principle proposed below. So the term \textit{hearer} in this paper stands for an emphatically abstract concept.
Our intuitions that certain derivations do not permit \textsc{aux}-drop, on this view, are hearer-side intuitions concerning what derivations can and cannot be reverse-engineered (in response to a signal in which the root CP is unpronounced), not limitations on what the grammar can generate in the first place. The speaker's grammar thus overgenerates, and the effects of PURE have the logical status of a filter.\footnote{Carlo Geraci (personal communication) notes a similarity between these considerations and aspects of the “perceptual loop” theory of self-monitoring advanced by Levelt (\citeyear[96]{Levelt:1983aa}, \citeyear[chapter 12]{Levelt:1989aa}), as developed and debated in subsequent work. Levelt proposes a monitoring process that is “normally. opaque to the speaker, {[}which{]} should, rather, be regarded as based on the parsing of inner or overt speech”. “The great advantage of {[}such{]} a perceptual theory,” he continues, “is that controlling one’s own speech is like attending to somebody else’s talk. This makes it natural for the speaker to apply the same parsing procedures and sources of knowledge to his own speech as to other people’s speech” (\citealt[96--97]{Levelt:1983aa}). PURE and its consequences as discussed in this paper thus have obvious psycholinguistic implications, which I will not explore here, for lack of expertise, but hope may be clarified in future work.}
Let us consider how PURE interacts with examples like (\getref{NoModal}a--b), where the hearer must posit an auxiliary verb in C. Here a semantically contentless supportive \textit{do} is minimally semantically contentful and compatible with the morphosyntactic environment (since the highest audible verb is in the morphological bare form that cooccurs with auxiliary \textit{do}). As a consequence, PURE guides the hearer of an utterance like (\getfullref{NoModal.a}) or (\getfullref{NoModal.b}) to posit a suppressed form of \textit{do} and prevents the positing of a suppressed modal such as \textit{can} or \textit{will}. Likewise, because the morphosyntactic environment of an \textsc{aux}-drop shows T movement to C, the least semantically contentful possibility for reverse-engineering the contents of C features an interrogative complementizer. We might also attribute to PURE the fact that the hearer is not free to assume that the CP of an \textsc{aux-}drop construction contains any contentful \textit{wh-}phrase other than the yes/no operator, conceivably the least contentful \textit{wh}-form (as it is invokes two fixed focus alternatives and is phonologically null in main clauses independent of \textsc{aux-}drop), but I will leave the details of this aspect of the puzzle for later work.\footnote{Carlo Geraci (personal communication) notes an unsolved problem for this approach: the fact that silencing of a \textit{wh}-phrase other than \textit{whether} is blocked even when the selectional properties of an obligatorily transitive verb might render this compatible with PURE. Thus *\textit{You wear?} is not acceptable, for example with the reading \textit{What did you wear?}, despite the transitivity of \textit{wear}. I leave this issue open.}
I return now to the factative effect in \textsc{aux-}drop, which I suggest is just another consequence of PURE. When it is necessary to reverse-engineer a derivation in which a tensed but unpronounced auxiliary verb has raised to C, PURE requires the hearer to posit a semantically minimal specification for the unpronounced T.\largerpage
But why should \textsc{past} qualify as a minimally contentful tense for an eventive predicate, while only \textsc{present} counts as minimally contentful for a non-eventive predicate? If \textsc{present} is a tense bound to the utterance time, then this relation may count as ubiquitous component of the “morphosyntactic environment” of any utterance, licensing the hearer to posit \textsc{present} as the tense specification of a silenced T, in keeping with PURE. A \textsc{past} specification for T, however, would add\textit{ anteriority} to the meaning of \textsc{present}, and thus qualify as less minimally semantically contentful. PURE might therefore prevent the hearer's parser from positing \textsc{past} with a non-eventive predicate, all things being equal. This derives the obligatorily \textsc{present} interpretation of an \textsc{aux-}drop clause with a non-eventive predicate.
Why then should an eventive predicate license the positing of \textsc{past} by the hearer as the tense of the speaker's derivation that is being reverse-engineered? Note that eventive predicates are incompatible with the simple\textsc{ present}, unless coerced into a habitual or generic use (a fact that will be important in our discussion of infinitival clauses below):\largerpage[1]
\pex<eventivepresent>
\ptxt{Present tense incompatible with eventive predicates (unless coerced)}
\a<a> *Mary sings in the shower now. / *Alice reads a book now. / *Bob sells that car now.\\ ~~[unless habitual]
\a<b> Sue owns a car now. / John likes my cat now. / Bill knows German now. etc.
\xe
\noindent I propose that it is precisely because of the incompatibility of the English \textsc{present} with an eventive predicate that PURE permits the hearer to posit an underlying \textsc{past} in an \textsc{aux-}drop construction where the unpronounced auxiliary in C is \textit{do} and the main predicate is eventive. \textsc{past} is the least semantically contentful option compatible with the morphosyntactic environment. I will leave it as an open question whether this suggestion for English \textsc{aux}-drop illuminates the roots of the factative effect in other languages such as Haitian Kreyòl.{\interfootnotelinepenalty=10000\footnote{\citet{Dechaine:1995} offers a more detailed proposal concerning the tense interpretation of eventive predicates in these constructions, which I believe could be incorporated into the present discussion. Her account also correctly predicts the fact that Haitian Kreyòl favors a non-past generic interpretation for eventive predicates with a bare indefinite direct object, a fact also found in the \textsc{aux}-drop construction when the direct object is a mass singular or bare plural (an observation also made by Michelle Sheehan, personal communication):
\pexcnn<bareplurals>
\a
\begingl
\gla Pyè vann bèf. {\nogloss{\tabto{1.8in} \textit{eventive/indefinite object →present}}}//
\glb Pyè sell cattle//
\glft `Pyè sells cattle.'\trailingcitation{\citet[74, ex. 37a]{Dechaine:1995}}//
\endgl
\a
\begingl
\gla You sell cattle/cars? //
\glft `Do you sell cattle/cars?'//
\endgl
\xe
\noindent I am grateful to Athulya Aravind (personal communication) for bringing \citet{Dechaine:1995} to my attention.}}
We may now adopt Fitzpatrick's proposal that \textsc{aux}-drop arises from the more general possibility of leaving the highest layer of the root clause phonologically uninterpreted, without the contradictions that arose from extending this possibility to semantic interpretation as well. If the proposal advanced here is correct, there is no comparable optionality for semantic interpretation. The syntactic derivation is subject to semantic interpretation up to the root. The factative effect is a by-product of failing to phonologically interpret the CP layer of the main clause, just as Fitzpatrick proposed. But it is not a direct result of the grammatical derivation per se, but instead reflects the strictures imposed by PURE on the hearer forced to reverse-engineer the speaker's derivation. In in the absence of evidence concerning the value of T that was included in the speaker's syntactic derivation, the hearer must assume a maximally unmarked value compatible with the morphosyntactic environment.\footnote{Our discussion leaves several important unresolved questions unanswered. We must ensure, for example, that a hearer's disambiguation of a syncretic form ambiguous between \textsc{past} and \textsc{present} such as \textit{put} or \textit{hit} is not subject to PURE. Ignorance concerning the precise identity of an item that has been phonologically interpreted (albeit confusingly) is evidently not the same problem for the hearer as determining the identity of an item that has avoided phonological expression entirely. Ellipsis is another, much larger elephant in the room of this analysis. There it is tempting to view the “surface anaphora” property of ellipsis (the need for a linguistic antecedent) as a sign of the strictures of PURE at work, but I leave the possible development of this idea for future work as well.}
\section{Exfoliation and the tense interpretation of infinitives}
\subsection{The derivational theory of infinitivization}
We are now in a position to take up the main topic of this paper: a second environment in which I have recently argued that tense and other material ends up unpronounced due to a property of the grammar that absolves this material from phonological interpretation, though for reasons quite different from those relevant to \textsc{aux}-drop. Here juxtaposition of these arguments with the semantic findings reported by \citet{Wurmbrand:2014aa} raises questions similar to the contradictions that I have attempted to resolve concerning Fitzpatrick's theory of English \textsc{aux}-drop. Once again, I will suggest a reverse-engineering reinterpretation of Wurmbrand's discoveries resolves these contradictions. Rather than reflecting semantic consequences of the speaker's syntactic derivation, as Wurmbrand proposes, I will suggest that they actually reflect the restrictions placed by PURE on the hearer's ability to reverse-engineer that derivation.
In work reported in \citet{Pesetsky:2019aa}, I have argued for a \textit{derivational theory of infinitival complementation}. On this view, all embedded clauses are generated by the syntax as full and finite CPs. Infinitival clauses are the result of a rule of \textit{exfoliation}, which strips away the outer layers of a finite CP, leaving behind an infinitival clause, under very specific circumstances: namely, when a probe external to CP finds a goal internal to that CP that does not occupy its edge. Exfoliation eliminates as many clausal layers as is necessary to place that goal at the edge of what remains, so it can interact with that goal (see \figref{fig:pesetsky:1}).\footnote{Crucial to the proposal as an account of English infinitives is the existence of a \textit{to} projection lower than T, and a principle (argued to have more general applicability) that leaves \textit{to} unpronounced when exfoliation does not strip away the projections higher than \textit{to}. See \citet{Pesetsky:2019aa} for discussion and argumentation.}
\begin{figure}
\caption{Exfoliation\label{fig:pesetsky:1}}
\begin{forest}for tree={s sep=15mm, inner sep=0, l=0}
[ [V\\\textit{\textbf{φ-probe}},name=probe]
[CP,name=cp
[\ldots,name=speccp] [C$'$,name=cbar [C,name=comp] [TP,name=tp
[T,name=tense,tikz={\node [rectangle,draw=black, fill=black, fill opacity=0.05,text opacity=1,anchor=right,fit=()(!u)(!uuu)(speccp)] {};}]
[\textit{to}P,name=exfoliate [\textit{\textbf{subject}},name=lowsubject] [\textit{to}$'$ [\textit{to}] [\textit{vP}]]]]]
{\draw (.east) node[right=1cm, align=left] {\small{← \textit{\begin{minipage}{\widthof{Exfoliation removes this portion}}Exfoliation removes this portion of the embedded clause\end{minipage}}}};}
]]
\draw[overlay, -{Triangle[]}, dashed] (probe) to[out=south,in=west] (lowsubject);
\end{forest}
\end{figure}
Arguments from several directions have been advanced to support this proposal.\footnote{In a Festschrift honoring Susi Wurmbrand, it is especially important to note that this proposal does not necessarily include Restructuring infinitives in its purview. Restructuring clauses of the German type studied by Wurmbrand are not in any obvious sense a response to cross-clausal probing, and might represent small constituents generated as such. I leave the integration of exfoliation with the phenomenon of Restructuring for future work.}
{\def\lingexbreakpenalty{10}
First, I argued that \textit{paradigms of acceptability for infinitival complementation} do indeed correlate with probe-goal relations across the clause boundary. Whenever a probe capable of triggering Raising successfully contacts an element in the specifier of \textit{to}P across a CP boundary, that CP is reduced to an infinitive, but not otherwise. The presence of a Raising probe on the higher V in (\getref{nominalsubjects}a), and on the higher \textit{v} in (\getref{nominalsubjects}b) accounts for the infinitivization of the embedded clause, while the absence of any comparable probe in (\getref{nominalsubjects}c--f) accounts for the impossibility of infinitivization in these examples.
\pex<nominalsubjects>
\NumTabs{8}
\ptxt{Nominal subjects of an infinitival clause}
\a Sue considers Mary to have solved the problem. \\\hbox{}\hfill\hbox{\emph{Raising to Object (spec,VP)}}
\a Mary seems to speak French well.\hbox{}\hfill\hbox{\emph{Raising to Subject (spec,\textit{v}P)}}
\a<seemsMary> \ljudge*It seems Mary to have solved the problem. \hfill\emph{unaccusative V}
\a \ljudge*It was believed Mary to speak French well.\hfill\emph{passive V}
\a \ljudge*Mary is aware Bill to be the best candidate.\hfill\emph{A}
\a<beliefit> \ljudge*Mary's belief it to have been raining \hfill\emph{N}
\xe
\noindent The standard competitor to this proposal is the traditional claim that infinitives are not derived from finite clauses but are generated nonfinite, with Case Theory accounting for contrasts like those in (\getref{nominalsubjects}), on the assumption that the subject of a nonfinite clause can only pass the case filter if some external element such as the higher verb in (\getref{nominalsubjects}a) or the higher T in (\getref{nominalsubjects}b) case-licenses it. The fact that non-nominals that otherwise do not need to pass the case filter, such as CP subjects and fronted adjectival predicates, show exactly the same paradigm, however, argues against this standard competitor:\largerpage
\pex<CPsubjects> \ptxt{Clausal subjects of an infinitival clause}
\a Sue considers [that the world is round] to be a tragedy.
\a {[That the world is round]} seems to be a tragedy.
\a \ljudge*It seems [that the world is round] to be a tragedy.
\a \ljudge*It was believed [that the world is round] to be a tragedy.
\a \ljudge*Mary is aware [that the world is round] to be a tragedy.
\a \ljudge*Mary's belief [that the world is round] to be a tragedy.
\xe
\pex[aboveexskip=0pt]<predicatefronting> \ptxt{Predicate fronting in an infinitival clause}
\a<considerPred> Sue considers [even more important than linguistics] to be the fate of the planet.
\a {[Even more important than linguistics]} seems to be the fate of the planet.
\a \ljudge*It seems [even more important than linguistics] to be the fate of the planet.
\a \ljudge*It was believed [even more important than linguistics] to be the fate of the planet.
\a \ljudge*Mary is aware [even more important than linguistics] to be the fate of the planet.
\a \ljudge*Mary's belief [even more important than linguistics] to be the fate of the planet.
\xe
\noindent Other more complex arguments reinforce the claim that the distribution of infinitival complements reflects conditions on exfoliation rather than factors such as subject case licensing traditionally claimed to be at work in these paradigms. The reader is referred to \citet{Pesetsky:2019aa} for these arguments, as well as for answers to certain obvious questions raised by this proposal that I will not attempt to answer here, such as the analysis of English infinitives introduced by \textit{for}. To keep the discussion simple, let us also imagine that Control infinitives, like their counterparts created by Raising, also involve a probe-goal interaction between the embedded subject occupying the specifier of \textit{to}P and an some element in the higher clause, as in the movement theory of control (Bowers \citeyear[675 ff.]{Bowers1973}, \citeyear{bowers}, Wehrli \citeyear[115--131]{Wehrli1980}, \citeyear{Wehrli1981}, \citealt{Hornstein1999b}), though \citet{Pesetsky:2019aa} presents an alternative possibility that I will not address here.}
A second type of argument advanced for this proposal is the fact that it generalizes to configurations in which a probe finds a goal occupying a position higher than the specifier of \textit{to}P. When this happens, the embedded clause is once again reduced by exfoliation, but now to something larger than an infinitive. This provides an account of the well-known \textit{complementizer-trace effect} (Perlmutter \citeyear{Perlmutter1968,Perlmutter:1971}; see \citealt{pesetsky2015complementizer} for a survey of subsequent discussion), in which an otherwise possible overt complementizer is obligatorily absent when a subject or subject-like phrase is extracted, leaving behind a clause that lacks its complementizer but remains finite:\largerpage
\noindent \pex<Complementizer-trace-Effect>
\ptxt{Complementizer-trace effect}
\a<ObjectExtraction> Who do you think (that) Sue met \gap.
\a<SubjectExtraction> Who do you think (*that) \gap met Sue.
\smallskip
\a Exactly how much more important than linguistics did she say (that) the fate of the planet was~\gap?
\a Exactly how much more important than linguistics did she say (*that) \gap was the fate of the planet?
\xe
\noindent If the overall proposal is correct, the explanation for complementizer-trace effects falls together with an explanation for why nonfinite clauses should exist in the first place, uniting two phenomena previously viewed as quite distinct.
Finally and most significantly in the present context, \citet{Pesetsky:2019aa} presents \textit{derivational opacity} arguments for the proposal that infinitival clauses are born full and finite, and become infinitives during the course of the syntactic derivation. The core of one such argument can already be seen in the predicate fronting examples (\getref{predicatefronting}a--b). A traditional account of infinitival clauses that attributes to the case filter the unacceptability of the starred examples in (\getref{nominalsubjects}) not only struggles to explain the parallel effect in (\getref{CPsubjects}) and (\getref{predicatefronting}) but also struggles to explain how the postverbal nominal passes the case filter at all, since there is neither an accusative-assigning verb in its immediate vicinity, nor an available instance of finite T. The much-discussed Icelandic phenomenon exemplified by (\getref{DATNOMR2}) below presents the same problem in a stronger form. Here an infinitival from which Raising has taken place has a quirky case-marked subject. Not only is the postverbal object of the embedded clause acceptable, but it bears \Nom{} case morphology, with no visible instance of finite T that could have entered an agreement relation with the it that results in \textsc{nom} case:
\pex<DATNOMR2>
\begingl
\glpreamble Quirky subject + \Nom{} object in an infinitival complement (Icelandic)//
\gla Læknirinn\ix{\textnormal{i}} telur barninu (í barnaskap sínum\ix{\textnormal{i}}) hafa batnað veikin.//
\glb the.doctor.\Nom{} believes the.child.\Dat{} (in foolishness his) have.\Inf{} recovered.from the.disease.\textbf{\Nom{}}//
\glft `The doctor\ix{\textnormal{i}} believes the child (in his\ix{\textnormal{i}} foolishness) to have recovered from the disease.'\trailingcitation{(\citet[242]{Yip1987a}, adapted}) //
\endgl
\xe
\noindent This is of course one of the phenomena that inspired \citet{Yip1987a} and \citet{Marantz1991} to abandon the proposal that \Nom{} depends on agreement with finite T in the first place (and with it, the proposal that nominals need to be licensed at all), in favor of a theory in which \Nom{} morphology is a default assigned to a nominal in an appropriate position when other case rules fail to apply.
On an exfoliation account, however, licensing and \Nom{} morphology in examples like (\getref{DATNOMR2}) pose no problem for theories that posit a connection between \textsc{nom} case assignment and finite T.\footnote{One might reject this connection for other reasons, of course, but it does appear to be cross-linguistically robust in environments without the special characteristics of (\getref{predicatefronting}a--b) and (\getref{DATNOMR2}) and others for which an exfoliation derivation might be plausible.} Since the embedded infinitival clause started its life full and finite, the postverbal nominal could enter an agreement relation with finite T within the embedded clause, just as it would in a clause that remained finite throughout the derivation. The interaction between the quirky subject and a Raising probe in the higher clause triggers exfoliation, which left the embedded clause infinitival, but this operation came later than the licensing of the postverbal nominal and the assignment of \textsc{nom} case to it. On this view, the presence of \Nom{} morphology on the postverbal subject is a relic of its earlier life as a nominal in an agreement relation with finite T, an instance of derivational opacity, since the T that played the crucial role in these events has been eliminated in the course of the derivation. \citet{Pesetsky:2019aa} provides independent evidence for this proposal from the observation that the anaphor-agreement effect blocks a reflexive as the postverbal object in these constructions, despite the absence of any visible agreement.
The derivational approach to the existence of nonfinite clauses faces an important problem, however, concerning semantic interpretation. All things being equal, we might expect to find straightforward derivational opacity arguments in this domain as well. Just as \Nom{} morphology is preserved on the postverbal subject in (\getref{DATNOMR2}) even after the T with which it agreed and from which it (arguably) received \Nom{} has been eliminated, so we might expect the various tenses and modals available to finite clauses to continue to show semantic signs of their former presence. In fact, however, tense interpretation in infinitival clauses is severely restricted, in a manner illuminated and clarified by \citet{Wurmbrand:2014aa}. Why do infinitival clauses not show the full range of semantic possibilities available to finite clauses? If they did, it would furnish a semantic derivational opacity argument analogous to the morphosyntactic arguments that support the exfoliation theory of infinitivization.
One response might be to reject the derivational view of infinitivization (in favor of a more standard approach according to which nonfinite clauses are generated as such, and problems like those raised above are solved in some other way). Another response might propose that some aspects of semantic interpretation apply late in the derivation, after exfoliation has taken place. This is a logical possibility mentioned in \citealt{Pesetsky:2019aa}, but entails that semantic interpretation does not apply entirely cyclically during the course of the syntactic derivation, contradicting results such as those reported by \citet[66--73]{Fox1999} and others that argue that semantic interpretation is strongly cyclic, fully interspersed with the syntactic derivation.
A variant of this second response might acknowledge that semantic interpretation is interspersed with the syntactic derivation, but permit the semantics of a constituent targeted by exfoliation to be \textit{revised}, deleting or altering those components of meaning that owed their existence to material deleted by the exfoliation operation.\footnote{I am grateful to Carlo Geraci and Michelle Sheehan for helping me clarify the reasoning in this paragraph.} Phonological interpretation might work this way as well, if it too is fully interspersed with the syntactic derivation. If, for example, a fully built CP undergoes phonological interpretation, only to lose its outer layers to exfoliation later in the derivation, we must entertain a theory according to which cyclic phonological interpretation is subject to later revision, and it would not be surprising to learn that semantic interpretation follows a similar pattern. As I noted in the introduction to this paper, \citet{Wurmbrand:2014aa} argues that English nonfinite clauses are deeply \textit{tenseless}, a proposal that might seem to fit this variant response quite neatly. Semantic tenselessness is a natural outcome if the elimination of TP by exfoliation triggers elimination of the semantics that TP introduced.\footnote{Michelle Sheehan (personal communication) makes the interesting observation that under Chomsky's \citeyearpar[13, ex. 9]{Chomsky2001} proposal concerning the timing of phonological and semantic interpretation, one might be able to adopt this variant without any notion of revision. According to this proposal, a phase is spelled out (its contents transferred to PF and to LF) and rendered impermeable to processes such as movement only when the \textit{next} phase head is merged. On this view, a clausal complement to V will not be subject to spell-out and rendered impermeable until the higher \textit{v}P has been completed. By this time, exfoliation of that clausal complement will already have taken place, since the relevant triggers are all contained in that\textit{ v}P phase. The entire raison d'être of exfoliation as developed in \citet{Pesetsky:2019aa}, however, rests on the impermeability of non-edge positions within the embedded clause to movement across the clause boundary. Exfoliation takes place precisely so as to leave the goal for the \textit{v}P-internal probe at the edge of what remains of the embedded clause, rendering it accessible for movement triggered by that probe. Though one can imagine reformulations that might render versions of the two proposals compatible, they are clearly at odds with respect to the status of the pre-exfoliation embedded clause.}
Nonetheless, I will argue for an entirely different solution to this puzzle here. I will suggest that semantic interpretation is \textit{not} revised in the wake of exfoliation, and thus that the interpretation of nonfinite clauses is always an interpretation inherited from derivational period when it was full and finite. On this view, the semantic effects charted by Wurmbrand are not indications of tenselessness, and in fact, are not restrictions on the semantics of infinitival complements at all. They are actually PURE effects: limitations on a hearer's ability to ascribe semantic properties to phonologically suppressed material, when reverse-engineering the derivation behind a speaker's utterance. I believe this alternative is more attractive because the semantics of nonfinite clauses (in English at least) do not actually point in the direction of tenselessness. The mapping among the semantic possibilities available to nonfinite and finite clauses is indeed complex (as we shall see). Nonetheless, the set of temporal and modal interpretations available to nonfinite clauses appears to be a \textit{proper subset} of the set of interpretations available to tensed finite clauses, its tense (and modal) semantics always corresponding to that of some type of tensed clause, with no sui generis possibilities that might indicate total tenselessness. I take this to be an important observation that may favor the approach developed below over the approach developed by \citet{Wurmbrand:2014aa}.
On the speaker's side of the story, I therefore suggest that in principle, any tense or modal in T may be eliminated by exfoliation in the process of generating an infinitival clause. Crucially, the semantics provided by this tense or modal remains intact and unrevised through the end of the derivation. It is the \textit{hearer}'s side of the story that imposes the restrictions documented by Wurmbrand and discussed below. Though in theory any tense or modal can be exfoliated away in the course of the speaker's derivation, in practice a hearer can posit only those tenses and modals to the embedded clause that are semantically \textit{minimal} and compatible with their environment, in the cases at hand, compatible with the selectional properties of the higher predicate and the ubiquitous availability of the utterance time as an anchor for tense. This is the source of our sense that infinitival clauses are inherently restricted in the tense and modal specifications that they can express. Not every meaning producible by a speaker's derivation can be reverse-engineered and attributed to it by the hearer.
\begin{sloppypar}
Though the proposal advocated here is essentially the opposite of Wurmbrand's (interpretation as tensed vs. deep tenselessness), my presentation will be entirely derivative of the findings reported in \citet{Wurmbrand:2014aa}, including her classification of the phenomena she discovered. Following Wurmbrand, I first consider future infinitives (complements to verbs like \textit{want} and \textit{decide}) and then propositional infinitives (complements to verbs like \textit{claim} and believe), followed by a brief discussion of infinitival clauses understood as simultaneous in tense with the clause in which they are embedded (complements to verbs like \textit{manage} and \textit{seem}). We are able to reach such different conclusions from the same set of findings because we pursue different strategies of argumentation concerning these findings. These can be summarized as follows:\largerpage
\end{sloppypar}
\pex
\ptxt{Strategies of argumentation}
\a \citet{Wurmbrand:2014aa}: The behavior of future, propositional, and simultaneous infinitives cannot be exclusively identified with any single value that tense may bear in a corresponding finite clause. These complements do display behavior consistent with tenselesness. Therefore they are deeply tenseless.
\a This paper: The behavior of future, propositional, and simultaneous infinitives may be identified with the \textit{union of behaviors} expected from all the semantically minimal values for tense that a hearer can posit when unambitiously reverse-engineering the pre-exfoliation portion of the speaker's derivation (as required by PURE). Therefore they are not deeply tenseless.
\xe
Crucially, if the alternative advocated in this paper is correct, we do have a derivational opacity argument for tense semantics after all, since the tense interpretation of an infinitive does reflect the pre-exfoliation tense properties of a T that is later deleted, a fact obscured by the severe restrictions imposed on the hearer by PURE. This will leave us with one apparent discrepancy between the outcome of PURE for \textsc{aux}-drop and its outcome for infinitivization, but this discrepancy follows from the difference between (1) non-pronunciation of syntactically present structure (\textsc{aux}-drop, following Fitzpatrick), and (2) actual deletion of syntactic structure by exfoliation.
\subsection{PURE and future infinitives}
Following Wurmbrand, I consider first the class of infinitival complements with future (or irrealis) semantics, like the Raising (ECM) complement (\getfullref{futureinf.a}) and the Control complement in (\getfullref{futureinf.b}):
\pex<futureinf>
\ptxt{Future infinitives}
\a<a> Yesterday, Mary wanted/needed John to leave tomorrow.
\a<b> Yesterday, Mary decided/wanted/planned to leave tomorrow.
\trailingcitation{(\citealt[408, adapted from ex. 6]{Wurmbrand:2014aa})}
\xe
\noindent Future infinitives have often been described as “tensed” in the literature since Stowell (\citeyear{Stowell:1981}, 40ff.; \citeyear{Stowell1982b}). Such theories entail that these infinitives contain in some fashion a silent variant of English \textit{will} or \textit{would}. Wurmbrand sought to dispel this idea, by demonstrating that the properties of future infinitives are not identical to those of either English \textit{will} or \textit{would}, which she analyzes (following Abusch \citeyear{Abusch:1985tm}, \citeyear{Abusch1988}) as bimorphemic auxiliary verbs consisting of an abstract modal \textit{woll} plus \textsc{present} tense (\textit{will}) or \textsc{past} tense (\textit{would}). She argues at length that the properties of future infinitives favor a theory according to which such infinitives are \textit{deeply tenseless}. Specifically, they contain \textit{woll} but no specification for \textsc{past} or \textsc{present} whatsoever. If her conclusions are correct, future infinitives present the exact opposite of a derivational opacity argument for the syntactic derivation of nonfinite clauses by exfoliation. They present a derivational conundrum for an exfoliation theory. If a future infinitive was indeed tensed in its derivational youth, as the exfoliation proposal claims, the theory must somehow ensure that no residue of its tensed beginnings survives in the semantics of its final infinitival form. Below, I survey these arguments and suggest an alternative.
Wurmbrand first contrasts the behavior of future infinitives with the behavior of present-tense \textit{will}. \textit{Will} places a situation in the absolute future with respect to the utterance time, while a future infinitive may pick out a time that merely follows the time of the higher clause:
\pex<absolutefuture>
\ptxt{Future infinitive → relative future vs. \textit{will} → absolute future}
\a<a> Leo decided a week ago [that he will go to the party (*yesterday)].
\a<b> Leo decided a week ago [to go to the party yesterday].
\trailingcitation{(\citealt[414, ex. 22]{Wurmbrand:2014aa})}
\xe
\noindent Sequence of tense (SOT) effects also reveal ways in which future infinitives do not behave as though they contain \textit{will}. Following \citet{Ogihara1996}, Wurmbrand assumes that sequence of tense effects are the result of a rule that deletes a tense at LF, if it is in the immediate scope of another tense with the same value, and binds the situation time of the lower clause to that of the higher clause. For this reason, as she notes, the embedded clause in \textit{We found out that Mary was happy} does not require the time of the embedded clause to precede the time of finding out, but permits the time of the embedded clause to overlap that time, as a consequence of the higher occurrence of \textsc{past} deleting the embedded occurrence.
As she also notes, citing Ogihara, the sequence of tense rule applies in the same way to \textsc{present} in a sentence like \textit{John will see the unicorn that is walking}, yielding a possible interpretation under which the unicorn's walking takes place at the seeing time, not utterance time. Crucially, it is the \textsc{present} component of \textit{will} that triggers the deletion at LF of embedded \textsc{present} (resulting of the binding of the lower situation time by the higher).
Wurmbrand now considers the three-clause structure in (\getfullref{SOTwill.a}) in which \textsc{past} in the highest clause is separated from \textsc{past} in the lowest clause by an intervening clause containing \textit{will}, which as we have seen contains \textsc{present}. As predicted, \textsc{past} in the lowest clause cannot be deleted, since the intermediate clause contains \textsc{present}, and the closest higher instance of \textsc{past} is in the highest clause. Crucially, however, replacing \textit{will} in the middle clause with a future infinitive in (\getfullref{SOTwill.b}) yields a different result, the possibility of an SOT interpretation of the embedded clause, which Wurmbrand interprets as directly triggered by \textsc{past} in the highest clause. \textsc{past} in the highest clause can trigger SOT deletion of \textsc{past} in the lowest clause, Wurmbrand suggests, because the intermediate clause is truly tenseless, and in particular does not contain a null counterpart to the \textsc{present}-tense \textit{will} in (\getfullref{SOTwill.a}).
\pex<SOTwill>
\ptxt{\textit{Will} blocks SOT deletion of \textsc{past}, but future infinitive does not}
\a<a>
{[}\textsubscript{\textsc{past}} John promised me yesterday {[}\textsubscript{\textit{will}} that he \dotuline{\textnormal{will}} tell his mother tomorrow {[}\textsubscript{\textsc{past}} that they were having their last meal together{]]]}.\smallbreak
*\textit{telling time = meal time}
\a<b> {[}\textsubscript{\textsc{past}} John promised me yesterday {[}\textsubscript{\textsc{fut infin}} \dotuline{to} tell his mother tomorrow {[}\textsubscript{\textsc{past}} that they were having their last meal together{]]]}.\smallbreak
✓\textit{telling time = meal time}
\trailingcitation{(\citealt[415, ex. 24, 25a]{Wurmbrand:2014aa}), building on \citealt{Abusch1988})}
\xe
Wurmbrand next contrasts the behavior of future infinitives with the behavior of past-tense \textit{would}. As she notes, an idiosyncrasy of \textit{would} is the fact that (except in the consequent of a conditional) it is permitted only in an SOT environment where its \textsc{past} feature can be deleted by \textsc{past} in the immediately containing clause. It is therefore blocked in a main clause (except as the consequent of a conditional missing its antecedent, e.g. *\textit{Yesterday, I would be king}), and blocked in an embedded clause if the immediately containing clause is not \textit{\textsc{past}}, as illustrated in (\getfullref{wouldrestriction.a}), where the higher clause contains \textsc{present}-tense \textit{will}. Crucially, a future infinitive is possible in the same environment where \textit{would} is blocked, as (\getfullref{wouldrestriction.b}) shows:
\pex<wouldrestriction>
\ptxt{\textit{Would} is excluded in non-\textsc{past} SOT environment, but future infinitive is not}
\a<a> \ljudge{*}{[}\textsubscript{\textnormal{will}} John will promise me tonight {[}\textsubscript{\textit{would}} that he \dotuline{\textnormal{would}} tell his mother tomorrow \ldots {]]}
\a<b> {[}\textsubscript{\textit{will}} John will promise me tonight {[}\textsubscript{\textsc{fut infin}} \dotuline{to} tell his mother tomorrow {[}\textsubscript{\textsc{past}} that they were having their last meal together{]]]}.
\trailingcitation{(\citealt[415, ex. 29a, 30a]{Wurmbrand:2014aa})}
\xe
\noindent Furthermore, as Wurmbrand also notes, the most embedded clause in (\getfullref{wouldrestriction.b}) lacks any SOT reading that could permit the meal-eating time to be identical with the telling time, as we would expect if the future infinitive could be understood as a silent version of \textsc{past}-tense \textit{would} (perhaps immune for some reason to the restriction to \textsc{past} SOT environments). It is therefore clear that the future infinitive cannot be uniformly identified as a silent version of \textit{would} any more than it can be uniformly identified as a silent version of \textit{will}. Once again, Wurmbrand concludes that future infinitives are simply tenseless, containing an untensed \textit{woll}.
In fact, however, another interpretation of these findings is possible, mentioned by Wurmbrand herself, who attributes the observation to “David Pesetsky and a reviewer” (p. 440).\footnote{I have no memory of making this observation.} Although the future infinitive does not behave \textit{uniformly} like either \textit{will} or like \textit{would}, wherever it fails to behave like \textit{will} it behaves like \textit{would}, and wherever it fails to behave like \textit{would}, it behaves like \textit{will}.
Consider first the availability of SOT deletion of \textsc{past} in the lowest clauses of (\getref{SOTwill}a--b) , impossible if the middle clause contains \textit{will}, but possible if the middle clause contains a future infinitive. Wurmbrand took these data to show that the middle clause is untensed, but they could equally well show that the middle clause contains a silenced \textsc{past}-tense \textit{would}:
\pex<SOTwould>
\ptxt{Substituting \textit{would} for \textit{will} in (\getfullref{SOTwill.a}) permits the missing reading}
{[}\textsubscript{\textsc{past}} John promised me yesterday {[}\textsubscript{\textit{would}} that he \dotuline{would} tell his mother tomorrow {[}\textsubscript{\textsc{past}} that they were having their last meal together{]]]}. \smallbreak
✓\textit{telling time = meal time}
\xe
On this view, it is \textsc{past} in the middle clause, not \textsc{past} in the highest cause, that deletes \textsc{past} in the lowest clause, yielding the SOT interpretation under which the telling time and the meal-eating time are identical. Note that the \textsc{past} feature of this silence \textit{would} will itself be deleted under the influence of \textsc{past} in the highest clause, but that is exactly what overt \textit{would} requires. Assuming that SOT applies cyclically, we have an instance of LF derivational opacity, since the tense responsible for deleting \textsc{past} in the lowest clause is not present in the final LF representation.
Now consider the availability of the future infinitive in (\getfullref{wouldrestriction.b}) in an environment where \textit{would} is blocked. Once again, though this possibility is compatible with Wurmbrand's view that the middle clause is untensed, it could equally well show that here the future infinitive contains a silenced \textsc{present-}tense \textit{will}, which is not blocked in this environment. And indeed, (\getfullref{wouldrestriction.b}) can be paraphrased with overt \textit{will} in the middle clause:\largerpage[1.75]
\pex[belowexskip=0pt,aboveexskip=.5\baselineskip]<SOTwould>
\ptxt{Substituting \textit{will} for \textit{would} in (\getfullref{wouldrestriction.a}) eliminates the star}
{[}\textsubscript{\textit{will}} John will promise me tonight {[}\textsubscript{\textit{will}} that he \dotuline{will} tell his mother \\tomorrow \ldots{]]}
\xe
The view that a future infinitive may be understood as containing either a silenced \textit{will} or a silenced \textit{would} is exactly what we expect under the exfoliation hypothesis for nonfinite clauses, according to which they are generated by Merge as full and finite CPs, with exfoliation responsible for stripping them of their CP and TP layers in the course of the derivation. On this view, all things being equal, the source of a future infinitive must be a finite clause with future content, but that content may in principle be either \textit{will} or \textit{would}. From this vantage point, the discovery that both possibilities are in fact instantiated comes as no surprise. Example (\getfullref{SOTwill.b}) is acceptable on an SOT reading because there is a derivation in which its middle clause was generated with \textit{would}, while (\getfullref{wouldrestriction.b}) is acceptable because there is a derivation in which its middle clause was generated with \textit{will}.
Now note that because \textit{would} (except as the consequent of a conditional) is idiosyncratically restricted to SOT environments, the two kinds of future modals that may be generated in a complement clause bear either \textsc{present} tense at LF (\textit{will}) or no tense whatsoever at LF, due to the tense-deleting action of SOT (\textit{would}). If these modals disappear in the course of the derivation as a consequence of exfoliation, yielding an infinitive, the hearer of such a clause faces a reverse engineering task not unlike that posed by an English \textsc{aux}-drop clause. In particular, the hearer's parser must assign content to the finite T of the derivational ancestor of the infinitival clause. If PURE is correct as stated in (\getref{Recoverability}), the hearer's options are tightly restricted, limited to “least semantically contentful possibilities compatible with the morphosyntactic environment”.
Is the distribution and range of possible interpretations for a future infinitives compatible with PURE? If \textit{semantic selection} and \textit{binding} count as elements of the morphosyntactic environment relevant to PURE, the answer is yes. Assuming with Wurmbrand that \textit{will} and \textit{would} are the \textsc{present} and \textsc{past} tense forms, respectively, of an abstract morpheme \textit{woll}, we need to ask (1) whether PURE permits the positing of \textit{woll} in infinitival complement clauses where no form of the modal is visible, and (2) whether PURE permits positing both \textsc{present} and \textsc{past} in free alternation as the tense of this modal. I believe the answer is plausibly yes.
If selection is a component of the morphosyntactic environment relevant to PURE, then the positing of an “ancestral” \textit{woll} in the complement to a verb like \textit{promise} can be justified by the semantic selectional properties of \textit{promise} and any other predicate that describes an attitude towards a future situation. \textit{Woll} adds no semantics to what is required by the morphosyntactic environment, and therefore should count as “minimal” in the sense relevant to PURE.
What about \textsc{present}, the non-modal component of \textit{will}? Building on the proposals advanced in Section \ref{subsec:Reverse-engineering-an-aux-drop}, if \textsc{present} is a tense bound to the utterance time, this relation alone should license positing \textsc{present} as the tense specification of T in a future infinitive, without violating PURE.
Finally, what about \textsc{past}, the non-modal component of \textit{would}? Continuing to build on the proposals advanced in Section \ref{subsec:Reverse-engineering-an-aux-drop}, a \textsc{past} specification for T that survived until LF should count as non-minimal, since it adds\textit{ anteriority} to the meaning of \textsc{present}. PURE should therefore prevent the hearer from positing ancestral \textsc{past} as part of the derivation of a future infinitive, all things being equal, with one important qualification. If an instance of \textsc{past} makes no semantic contribution at all because it is deleted by the SOT rule, positing such an instance of \textsc{past} will be perfectly compatible with the strictures of PURE. As Wurmbrand noted and as discussed above, \textit{would} is in fact restricted to SOT environments. It thus follows that the hearer's parser should be free to posit ancestral \textit{would} as an auxiliary verb of a future infinitive, just as suggested above.
Summarizing the crucial properties of the speaker and the unambitious
re\-verse-engineering hearer in this domain:
\pex
\ptxt{Speaker and hearer summary: Future infinitives}
\textit{Speaker}: Free to posit any content whatsoever for T of the embedded clause\\
\textit{Hearer (restricted by \textnormal{PURE})}:
\a Hearer posits \textit{woll} because it is selected by the higher verb. No other modal is possible.
\a Hearer may posit \textsc{present} as the pre-exfoliation tense of the future modal because it is semantically minimal (as we saw in discussing \textsc{aux}-drop), yielding \textit{will}.
\a Hearer may posit \textsc{past} as the pre-exfoliation tense of the future modal so long as it is semantically inert due to SOT (as is always the case with \textit{would}).
\xe
\subsection{PURE and propositional infinitives}\largerpage
I turn now to non-future infinitival clauses with propositional semantics, such Raising/ECM complements to verbs like \textit{believe} (e.g. \textit{She believes Mary to be the winner}) and control complements to verbs like \textit{claim} (\textit{She claimed to be the winner}). As Wurmbrand notes (cf. \citealt{Pesetsky1991}), these complements have aspectual properties strongly reminiscent of the English \textsc{present}, resisting eventive interpretation of simple VPs, as briefly discussed in Section \ref{subsec:Reverse-engineering-an-aux-drop} above:\footnote{Wurmbrand uses the term “episodic”, where I use “eventive” for consistency with other discussion. If there are crucial differences between these notions that might compromise the overall argument, I leave that issue for future research.}
\pex<eventive>
\ptxt{Eventive interpretation: propositional infinitives that pattern with English \textsc{present} tense}
\a<d> Bill knows German well. \tabto{3in}\textit{✓non-eventive}
\a<e> They believe Bill to know German well.
\a<f> They claim to know German well.
\vspace{.5\baselineskip}
\a[label=d]<a> \ljudge*Mary sings in the shower right now.\tabto{3in}\textit{*eventive}
\a[label=e]<b> \ljudge*They believe Mary to sing in the shower right now.
\a[label=f]<c> \ljudge*They claim to sing in the shower right now.
\trailingcitation{(\citealt[431, adapted from ex. 55--56]{Wurmbrand:2014aa})}
\xe
\noindent The English \textsc{past} does license eventive interpretation, but infinitival complements to verbs like \textit{believe} and \textit{claim} cannot be understood as bearing \textsc{past} tense semantics (without the addition of \textsc{have}+-\textit{en}, discussed below), regardless of eventivity:
\pex<eventive2>
\ptxt{Propositional infinitives that may not be understood as \textsc{past} tense}
\a They knew German well when they were young.
\a \ljudge*They believe(d) Bill to know German when they were young.
\a \ljudge*They claim(ed) [to know German well when they were young].
\vspace{.5\baselineskip}
\a Mary sang in the shower yesterday at 8:00.
\a<b> \ljudge*They believe(d) Mary to sing in the shower yesterday at 8:00.
\a<c> \ljudge*They claim(ed) to sing in the shower yesterday at 8:00.
\xe
\begin{sloppypar}
Let us first consider these observations from an exfoliation perspective. If infinitival clauses like those in (\getref{eventive}\getref{eventive.e}--\getref{eventive.f}) are derived from full finite clauses, once again the hearer of such a complement must reverse-engineer the speaker's derivation, and posit a tense value for T in that clause. If PURE permits the hearer to posit ancestral \textsc{present} but not \textsc{past}, for the reasons just discussed, the contrasts in (\getref{eventive}) and (\getref{eventive2}) are immediately predicted. If the hearer posits ancestral \textsc{present}, it is no surprise that eventive interpretation is restricted just as it is in \textsc{present} tense clauses that have not been reduced to infinitives by exfoliation. Positing \textsc{past} is ruled out by PURE, since \textsc{past} is not semantically minimal as \textsc{present} is.
\end{sloppypar}
Wurmbrand, however, presents an SOT environment in which infinitival complements like these behave differently from \textsc{present} tense finite clauses. The argument once again involves SOT in a three-clause structure in which the infinitival clause is the middle clause:
\pex<pregnancy1>
\ptxt{Propositional infinitives that appear not to block SOT}
\a {[}\textsubscript{\textsc{past}} A year ago, they believed Mary {[}\textsubscript{\textsc{prop infin}} to know {[}\textsubscript{\textsc{past}} that she was pregnant{]]]}.
\a {[}\textsubscript{\textsc{past}} A year ago, Mary claimed {[}\textsubscript{\textsc{prop infin}} to know {[}\textsubscript{\textsc{past}} that she was pregnant{]]]}.
\trailingcitation{(\citealt[433, ex. 59b, 59c]{Wurmbrand:2014aa})}
\xe
\noindent As Wurmbrand points out, the pregnancy time in the examples
of (\getref{pregnancy1}) may be understood as bound by
the believing/claiming time, a clear sign that the SOT rule has deleted
\textsc{past} in the embedded clause. This is of course not possible
if the infinitival middle clause is understood as containing \textsc{present},
since SOT deletes a lower tense under identity with the most
immediately superordinate tense. Wurmbrand concludes that it is the
\textsc{past} tense of the main clause that triggers deletion of the
\textsc{past} tense of the most embedded clause, and therefore the infinitival
middle clause must be viewed as tenseless.
Once again, however, the exfoliation/reverse engineering approach suggests an alternative. The contrasts in (\getref{eventive2}) show that a hearer cannot posit ancestral \textsc{past} for the infinitival complement of a verb like \textit{believe} or \textit{claim}, where \textsc{past} should survive until LF and receive its normal interpretation. If a hearer were to posit ancestral \textsc{past} in the middle clause of (\getref{pregnancy1}), it could be deleted by the SOT rule (since the tense of the higher clause is also \textsc{past}). When this happens, \textsc{past} in the middle clause will make no contribution of its own to LF interpretation, and will consequently count as a PURE-compatible choice for the hearer reverse-engineering the derivation of the middle clause. On this view it is \textsc{past} in the\textit{ middle} clause that triggers SOT deletion of \textsc{past} in the lowest clause (before it itself is deleted), not \textsc{past} in the highest clause. The logic is essentially the same as the logic behind our proposal for (\getfullref{wouldrestriction.b}).\footnote{Wurmbrand once again mentions the possibility that these infinitives might contain a “deleted \textsc{past}” (p. 432, fn. 25), but rejects this possibility as incapable of explaining “why the \textsc{past} must always delete, and how this is {[}im{]}possible {[}\textit{correcting a probable typo}{]} in non-SOT contexts (e.g. \textit{Julia claims to be pregnant} cannot mean `Julia claims that she was pregnant'). In the logical structure of the alternative suggested here, it is PURE that fills this explanatory gap. Undeleted (and unselected) \textsc{past} is not semantically minimal, and therefore cannot be posited by the (obligatorily unambitious) hearer in the process of reverse-engineering the derivation that produced an infinitival complement by exfoliation.}
Wurmbrand notes a related contrast between infinitival complements to verbs like \textit{believe} and \textit{claim} amenable to the same alternative view. In examples like (\getref{pregnancy2}\getref{pregnancy2.a}--\getref{pregnancy2.b}), \textsc{present} embedded under \textsc{past} receives an obligatory \textit{double-access reading}, according to which Julia's pregnancy held at both the believing/claiming time (five years ago) and at utterance time (now), which is biologically impossible. The infinitival complements in (\getref{pregnancy2}\getref{pregnancy2.c}--\getref{pregnancy2.d}), by contrast, do not require a double-access interpretation, and permit the pregnancy time to be identified with the believing/claiming time. I thus cannot assume that these infinitival clauses are derived with any form of \textsc{present}:
\pex<pregnancy2>
\ptxt{Propositional infinitives that do not require double access reading (unlike \textsc{present})}
\a<a> \ljudge\#Five years ago, it was believed that Julia is pregnant.
\a<b> \ljudge\#Five years ago, Julia claimed that she is pregnant.
\a<c> Five years ago, Julia was believed to be pregnant.
\a<d> Five years ago, Julia claimed to be pregnant.
\trailingcitation{(\citealt[432, ex. 58]{Wurmbrand:2014aa})}
\xe
\noindent As before, Wurmbrand concludes that these infinitives are deeply tenseless. Once again, however, the exfoliation/reverse engineering alternative permits these clauses to contain ancestral \textsc{past}. Since this instance of \textsc{past} is deleted by SOT, its presence may be posited in the reverse-engineering process without violating the strictures of PURE. Note that in the end, an infinitival clause that started its life with its tense specified as \textsc{past} ends up tenseless, just as in Wurmbrand's theory. The crucial difference is derivational. I am not proposing that infinitival clauses are intrinsically tenseless. Under the analysis suggested here, some are interpreted as containing \textsc{present}, since that tense is minimal, even though others do end up truly tenseless, thanks to SOT deletion of \textsc{past}.
\pex
\ptxt{Speaker and hearer summary: propositional infinitives under verbs like \textit{believe} and \textit{claim}}\medbreak
\textit{Speaker:} Speaker is free to posit any content whatsoever for T of the embedded clause (as before).\footnote{By ``any content whatsoever", I mean any content compatible with the rules that govern speaker's derivations. Thus, for example, as Michelle Sheehan notes, the fact that a verb such as \textit{plan} requires a semantically future complement will impel the speaker to include a form of \textit{woll}. I should also note that some verbs impose post-exfoliation selectional requirements that reject derivations in which exfoliation has not created a nonfinite clause, as discussed in \citet{Pesetsky:2019aa}. Such requirements also restrict the speaker's derivation.}\\
\textit{Hearer (restricted by PURE):}
\a Hearer may not posit a modal because none is selected.
\a Hearer may posit \textsc{present} as the pre-exfoliation tense of the future modal because it is semantically minimal (as we saw in discussing \textsc{aux}-drop), yielding \textit{will} (as before).
\a Hearer may posit \textsc{past} as the pre-exfoliation tense of the future modal so long as it is semantically inert due to SOT (as is always the case with \textit{would}) (as before).
\xe
\subsection{Why do propositional infinitives show only one side of the factative
effect}
\label{subsec:An-important-unsolved} I have suggested that \textsc{aux}-drop and infinitival complementation tell a unified story about the effects of PURE. On one important point, however, the two discussions seem to point in different directions. In this section, I will suggest a way to reconcile them, though much remains to be worked out.
The factative effect for \textsc{aux}-drop permits a silenced T to be understood by a hearer as \textsc{past} when the verb phrase it embeds is eventive, but not when it is a non-eventive. I suggested in Section \ref{subsec:Reverse-engineering-an-aux-drop} that \textsc{past} is available with an eventive verb phrase precisely because \textsc{present} is independently blocked with eventive predicates (unless they are understood as habitual or generic). For this reason, PURE permits the hearer to reverse-engineer a derivation in which the tense of the unpronounced auxiliary verb in C has the value \textsc{past}. This is the minimally semantically contentful choice compatible with the morphosyntactic environment. Why then do we not find a similar effect with propositional infinitives, where the same logic should permit a \textsc{past} interpretation for the embedded infinitival clauses of examples like (\getref{eventive2}\getref{eventive2.b}--\getref{eventive2.c})?
\pex<puzzle>
\ptxt{\textsc{aux}-drop vs. propositional infinitive \textsc{past}-tense possibilities}
\a
\begingl
\gla You see John yesterday? \nogloss{\tabto{2.8in} (\textsc{aux}\textit{-drop})} //
\glft `Did you see John yesterday' //
\endgl
\a
\begingl
\gla \ljudge*We believed Mary to see John yesterday. \nogloss{(\textit{propositional infinitive})} //
\glft \textit{intended}: `We believed that Mary saw John yesterday.' //
\endgl
\a
\begingl
\gla \ljudge*Sue claimed to see John yesterday. //
\glft \textit{intended}: `Sue claimed that she saw John yesterday.' //
\endgl
\xe
An important clue may lie in a fact pointed out to me by Susi Wurmbrand (personal communication). In propositional infinitives like those under discussion here, simple \textsc{past} can actually be overtly expressed by the use of auxiliary verb \textit{have} plus the past participle, a combination that is obligatorily interpreted as perfect tense in clauses that remain finite:
\pex<eventive3>
\ptxt{Propositional infinitives in which \textit{have}+participle → \textsc{past}}
\a<a> They believe(d) Mary to have seen John yesterday at 8:00.
\a<b> They claim(ed) to have sung in the shower yesterday at 8:00.
\a<c> They believe(d) [Bill to have known German when they were young].
\a<d> They claim(ed) [to have known German well when they were young].
\xe
\noindent Independent of the puzzle of (\getref{puzzle}), the facts in (\getref{eventive3}) present an additional challenge to an exfoliation approach to non-finite clauses, since they display another unexpected difference in the semantics of finite clauses and their infinitival counterparts. I suggest that solving the puzzle of (\getref{eventive3}) may help solve the puzzle of (\getref{puzzle}) as well.
The nature of the English perfect tense is a hotly debated topic, but it appears that one of the several hypotheses still in the running (recently defended by \citealt{KlechaPerfect}) is the claim that auxiliary \textsc{have-}\textit{en} is a realization of \textsc{past}, yielding present perfect interpretation when the T that selects it is \textsc{present}, and pluperfect interpretation when that T is \textsc{past (}see \citealt[Section 3 (esp. 3.1)]{GrOnn:2021aa}, for discussion and summary). Suppose we accept this proposal. We must now ask why \textsc{have-}\textit{en} cannot be used as the sole tense-denoting element in a finite clause, which would incorrectly permit a sentence like \textit{They have seen John} to be understood as a simple \textsc{past}-tense utterance, rather than a perfect. Let us imagine that an English clause must obey the following surface filter on the featural content of T:
\pex<filter>
\ptxt{T-valuation filter}
* T unless specified for \textsc{past} or \textsc{present}.
\xe
\noindent In a clause that contains T throughout the derivation, \textsc{have}-\textit{en} will never be able to serve as the sole bearer of tense. In any such clause, T must be \textsc{past} or \textsc{present} so as to not violate (\getref{filter}).\footnote{In constructions in which \textsc{have}-\textit{en} is embedded under an epistemic modal, its interpretation as \textsc{past} is extremely salient, e.g. \textit{Sue must have seen John} \textit{yesterday at 8:00}. \textsc{Have-}\textit{en} is not the sole bearer of tense here, however. Though \textit{must} does not show a morphologically overt \textsc{present\textasciitilde past} alternation like \textit{can\textasciitilde could}, we may presume that it is specified as \textsc{present}, and that (\getref{filter}) is therefore satisfied. I am grateful to Asia Pietraszko (personal communication) for raising this point. As Athulya Aravind (personal communication) notes, future perfect constructions make the same point even more clearly, e.g. \textit{Sue will have seen John yesterday at 8:00}. Here of course, we have independent evidence that \textit{will} includes a second instance of tense (\textit{woll}\,+\,\textsc{present}); cf. \textit{They claimed that Sue would have seen John by then} (with \textit{woll}\,+\,\textsc{past}).} The combination of T with \textsc{have-}\textit{en} will thus produce the semantics of pluperfect or perfect tense, depending on the value of T chosen.\footnote{The SOT rule does delete the \textsc{past} or \textsc{present} feature of T, and might be understood as producing a violation of the T-valuation filter as stated in (\getref{filter}), but the rule also binds the tense specification of the T that undergoes that rule to that of the T that triggered the rule. I will assume that for this reason a T that undergoes the SOT rule still counts as “specified” and does not violate (\getref{filter}).}
If, however, T is eliminated by exfoliation, then even if it was never valued \textsc{past} or \textsc{present}, it should not produce any sense of deviance: an instance of “salvation by deletion”. Such a derivation will produce no detectable violation of (\getref{filter}) precisely because the T that might have violated the filter is no longer present at the end of the derivation (after exfoliation). This is why \textsc{have}\textit{-en} may be the sole bearer of \textsc{past} in an infinitival clause, explaining the pure \textsc{past}-tense interpretation available to the embedded clauses of (\getref{eventive3}).\footnote{Perfect interpretation is also possible. For example, \textit{Mary lived here for many years} differs from \textit{Mary has lived here for many years} in implying that she no longer lives here, but \textit{I believe Mary to have lived here for many years} permits this reading.}
Returning now to the puzzle in (\getref{puzzle}), we might explain the unavailability of a \textsc{past} interpretation for a propositional infinitive as the result of the hearer's strategy in (\getref{assumptions}):\largerpage
\pex<assumptions>
\ptxt{Constraint on hearer's ability to posit \textsc{past} in an infinitival clause}
Because \textsc{past} can be overtly expressed in an infinitival clause, the hearer will assume that speaker would have expressed it overtly (using \textsc{have}-\textit{en} if \textsc{past} interpretation had been intended, and will therefore never posit \textsc{past} as a value for T in the absence of \textsc{have}-\textit{en}.
\xe
\noindent This proposal conforms to the spirit of PURE, since it continues to enforce unambitiousness when the hearer considers positing unpronounced material, but does not directly follow from it as stated. I leave that as a problem for future work. Crucially, note that (\getref{assumptions}) concerns \textsc{past}-tense \textit{interpretation}, and therefore still does not prevent the hearer from positing a \textsc{past} specification for T that is deleted at LF by the SOT rule, as discussed in preceding sections.\footnote{Interestingly, I do not believe SOT applies to instances of \textsc{past} whose sole exponent is \textsc{have-}\textit{en}. \textit{Mary claimed to have been happy} lacks any reading in which happiness time overlaps claiming time. This too makes (\getref{assumptions}) irrelevant for cases in which I have proposed that the hearer may posit \textsc{past} as a pre-exfoliation value for T without violating PURE because it is deleted by SOT (and thus counts as semantically minimal). Why SOT fails to apply to \textsc{have}-\textit{en} in the first place, however, is unclear to me. Carlo Geraci suggests that SOT might be more generally constrained to apply only across a clause boundary. This would also explain why \textsc{past} T\,+\,\textit{\textsc{have-}}\textit{en}, e.g. \textit{Mary had written the letter already}, can only be understood as a pluperfect, and not a present perfect, as one might expect if the \textsc{past} semantics of \textsc{have-}\textit{en} could be deleted at LF by the SOT rule.}
The most important question facing us now, however, concerns \textsc{aux}-drop. Why doesn't (\getref{assumptions}) prevent the hearer from posit \textsc{past} as an underlying value for T in an \textsc{aux}-drop clause with an eventive predicate, as it does in a propositional infinitive? The answer is in fact straightforward. In \textsc{aux}-drop as analyzed above (building on Fitzpatrick's proposals), T is never deleted. No exponent of T is heard by the hearer, true, but that is not because T has been deleted, but because T-to-C movement has applied and the contents of C were not interpreted by the phonology. In an \textsc{aux}-drop question, T is present throughout the derivation, so no end run around (\getref{filter}) occurs (no “salvation by deletion”). As a consequence, \textsc{have}-\textit{en} can never be the sole bearer of tense in an \textsc{aux}-drop clause, as illustrated by (\getref{perfectauxdrop}).
\pex<perfectauxdrop>
\ptxt{\textsc{have}-\textit{en} in \textsc{aux}-drop yields present perfect meaning only (not \textsc{past})}
\a \ljudge*Mary written that message yesterday at 8:00?\tabto{3in}{(\textit{attempt at} \textsc{past})}
\a Mary written that letter yet?\tabto{3in}{(\textit{present perfect})}
\xe
To summarize: though both entail non-pronunciation of an exponent of tense, \textsc{aux-}drop and infinitivizing instances of exfoliation are quite distinct processes. \textsc{Aux-}drop involves mere non-pronunciation of T in C, while infinitivizing exfoliation involves actual removal of T from the derivation. Their divergent behavior faced with an eventive predicate, seen in (\getref{puzzle}), follows from this difference. The T of clause that ends up non-finite may violate filter (\getref{filter}) without incurring any penalty. This in turn makes it possible for \textsc{have-}\textit{en} to produce a clause with simple \textsc{past} semantics, a possibility that prevents the hearer from positing \textsc{past} as the underlying specification for pre-exfoliation T in an infinitival clause without violating PURE, given (\getref{assumptions}). The T of an \textsc{aux-}drop clause is never deleted. Consequently filter (\getref{filter}) prevents \textsc{have-}\textit{en} from ever being the sole tense in the clause, (\getref{assumptions}) is never invoked, and \textsc{past} interpretation for an eventive VP is compatible with PURE. At the same time, though \textsc{aux-}drop and infinitivizing instances of exfoliation differ in this way, they impose a common burden on the hearer, who is faced in both cases with unpronounced instances of otherwise pronounced structure, hence their core similarity: the fact that \textsc{T} cannot be blithely posited as bearing the value \textsc{past} for a non-eventive predicate, unless later deleted by SOT, but can only be identified as \textsc{present} (or tenseless, when an end run around (\getref{assumptions}) is made possible by exfoliation).\footnote{Should tenselessness outcompete \textsc{present} as a value for T that may be assumed by a hearer reverse-engineering a propositional infinitive? If both count as maximally unambitious possibilities (total absence of value vs. value linked to always-available utterance time), the answer should be no, but some sharpening of the statement of PURE might be necessary.}
\subsection{Predicates imposing simultaneity}
Finally, we must take note of a third class of predicates discussed by Wurmbrand. These take infinitival complements, some of which have propositional semantics, but are fully compatible with eventive predicates and \textsc{past} interpretation of the complement, so long as the selecting predicate is itself \textsc{past} tense.
\pex<simul>
\ptxt{Predicates imposing their reference time on infinitival complement: \textsc{past}}
\a Yesterday, John tried/began . . . /managed . . . to sing (*tomorrow/*next week).
\a The bridge began/seemed to tremble (*tomorrow/*next week).
\trailingcitation{(\citealt[436, ex. 66]{Wurmbrand:2014aa})}
\xe
\noindent Substituting \textsc{present} tense for \textsc{past} eliminates
the possibilities seen in (\getref{simul}):
\pex<simul2>
\ptxt{Predicates imposing their reference time on infinitival complement: \\ \textsc{present}}
\a \ljudge*John seems to sing right now.
\a John seems to know German.
\trailingcitation{(cf. \citealt[437]{Wurmbrand:2014aa})}
\xe
Wurmbrand concludes that in the usage seen in (\getref{simul}), at least, these are “matrix predicates {[}that{]} impose their reference time as the reference time of the embedded infinitive” (p. 437). Once again, she proposes that these infinitival complements are deeply tenseless. Once again, the very fact that the matrix predicate imposes its reference time on the embedded infinitive can be understood as licensing the hearer to posit the corresponding tense specification as part of the pre-exfoliation derivation of the complement clause, as permitted by PURE.\footnote{Wurmbrand also discusses contexts in which predicates such as \textit{seem} behave more like \textit{believe}, which I will not summarize here. I believe the results of this discussion can be incorporated in the alternative advanced in this paper without change.}
\section{Conclusions}
This paper has suggested an alternative to Wurmbrand's \citeyearpar{Wurmbrand:2014aa} analysis of English infinitives as inherently tenseless. This analysis is not merely compatible with the exfoliation approach to infinitivization that I proposed in \citet{Pesetsky:2019aa}, but also helps resolve a paradox lurking in the overall approach: the fact that infinitival clauses did not seem to present a derivational opacity argument for exfoliation from tense semantics parallel to the argument they offer from case morphology in examples like (\getref{predicatefronting}a--b) and (\getref{DATNOMR2}). While \textsc{nom} morphology survives the deletion of its finite T assigner, \textsc{past} tense and modal semantics in T does not.\footnote{There are a number of important elephants in the room, which I have ignored here (in keeping with normal usage of the elephant metaphor). To mention just two that demand immediate attention:
Any instance of morphological syncretism, for example, raises issues for PURE. Why is \textit{the sheep must leave} ambiguous between singular and plural \textit{sheep}, and likewise, why is \textit{They put up with it} ambiguous between \textsc{pres} and \textsc{past}? Material unpronounced as a consequence of a morphological paradigm must somehow be excluded from PURE calculations.
Likewise, the phenomenon of ``surface anaphora'' (\citealt{Hankamer1976a}), whereby certain kinds of ellipsis demand an overt linguistic antecedent, is very much in the spirit of PURE (the antecedent licensing the otherwise non-minimal content posited as underlying the ellipsis), but recent work by \citet{Rudin:2019aa}, among others has called renewed attention to instances of ellipsis whose interpretation includes material unsupported by the overt antecedent, another challenge for PURE. I thank Peter Grishin, personal communication, for raising this issue.}
If the proposal sketched here is correct, semantics does present a comparable derivational opacity argument in principle, but we are prevented from seeing it clearly by PURE, which prevents us as hearers from attributing non-minimal semantic content to a tense or modal that has been deleted by exfoliation. An additional argument for this approach came from the English \textsc{aux}-drop construction, where PURE resolves a key contradiction arising from Fitzpatrick's otherwise optimal account.
If this style of explanation is fruitful, we should ask whether there are other problems and paradoxes that might be resolved by permitting the class of producible derivations to misalign with the class of reverse-engineerable derivations, as I have proposed in this paper. I have suggested that certain problems might be resolved if certain derivations producible by the speaker may not be reproduced by the hearer. Perhaps other problems might be resolved in the opposite manner, if the reverse engineering process hosted by the hearer permits options that are in fact barred for the speaker. For example, imagine that when the hearer attempts to reproduce the syntactic derivation of the speaker, they are free to ignore EPP features, so that a raised nominal in the speaker's utterance might remain unraised in the reverse-engineered hearer's derivation. In this respect, the hearer's reverse engineering might show some ambition after all, in its reconstruction of the speaker's syntax, if not their semantics. This might be an approach to reconstruction phenomena worth exploring. Conversely, if one imagines that the hearer is free to assume EPP features not present in the speaker's derivation, one might be led to a new view of phenomena normally viewed as covert movement internal to the speaker's syntactic derivation. I will leave the question of whether these are (or are not) promising avenues of investigation open -- a topic, I hope, for future conversation and debate with the dedicatee of this volume.
\section*{Acknowledgments}
For useful discussion, I am grateful to the students in my Fall 2019 graduate seminar at MIT, where I first presented this material, and especially to my co-teacher and colleague Athulya Aravind. I have also benefited from the comments of students and visitors at a January 2020 class at the University of Vienna and the 2021 Winter Virtual New York Institute; from questions following a presentation at the 13th Brussels Conference on Generative Linguistics; and from discussions with Jeroen van Craenenbroeck, Sabine Iatridou, Donca Steriade, Esther Torrego, and Susi Wurmbrand. I am particularly grateful to Carlo Geraci and Michelle Sheehan for valuable comments on an earlier draft which improved the discussion substantially.
A December 2020 consultation on Facebook left me unclear whether ``reverse engineer" or ``reverse engineerer" is the better term. Strong views were offered by friends and colleagues on both sides of the question. I hope the dedicatee of this paper is content with my ultimate decision.
{\sloppy\printbibliography[heading=subbibliography,notkeyword=this]}
\end{document}
| {
"alphanum_fraction": 0.7978586642,
"avg_line_length": 169.211038961,
"ext": "tex",
"hexsha": "0447bdb105a929dd85c59242738955d1c1960937",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-25T10:17:35.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-25T10:17:35.000Z",
"max_forks_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "laszabine/323",
"max_forks_repo_path": "chapters/pesetsky.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "laszabine/323",
"max_issues_repo_path": "chapters/pesetsky.tex",
"max_line_length": 2680,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "66c8dd5c84abc081ca335309cc807c75e1df57e5",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "laszabine/323",
"max_stars_repo_path": "chapters/pesetsky.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 24939,
"size": 104234
} |
\section{Message Passing Interface}
Message Passing Interface (MPI) is the de facto standard for message passing, when doing high performance computation. It was first released in 1994 as version 1.0. It was created in collaboration between 40 different organisations involving about 60 people in total. It was based in a preliminary proposal, known as MPI1, that was created by Jack Dongarra, Rolf Hempel, Tony Hey, and David Walker and finished in November 1992 and inspired from the, then, state of the art practice, such as PVM, NX, Express, p4, etc. It has since been iterated upon, with the latest version, MPI-3.0 being released in 2012.
The reason for creating this standard is to ensure portability and ease-of-use. In a communicating distributed memory environment, high-level routines and abstractions are often build upon lower level message passing routines, of which this standardisation ensures that these abstractions and routines are available on all systems. This also presents vendors with a few clearly defined base-routines they need to implement efficiently and, if possible, provide hardware support for, to enhance scalability.
MPI is designed to allow efficient communication between processes, use in heterogeneous environments and ensure thread-safety. The semantics of the interface itself should be independent from any language it is implemented in, however it does focus on providing convenient bindings for the C and Fortran languages. MPI also specifies how communication failures are dealt with, in the underlying systems.
The standard is designed for message passing only and does not include specifications for explicit operations on shared memory. Operations, that go beyond the current operating system support, are not included. These include interrupt-driven receives, remote execution and active messages, since the interface is not intended for kernel programming/embedded systems. Neither does it include support for debugging, explicit support for threads or task management nor support for I/O functions \cite{mpi1}.
\subsection{Message Types}
\subsubsection{Point-to-point Operations}
Point-to-point communication is the the basic building block of message passing.
\paragraph{Send}
Here one process sends data in form of a message to a single other process.
\paragraph{Receive}
When a process receives a message, it enqueues the message in a queue called its message box. Each message in the message box is processed sequentially by dequeuing and handling them one at a time \cite{mpi1}.
\subsubsection{Collective Operations}
\paragraph{Broadcast}
Broadcast is a one-to-many operation, where one process has some specific data that it sends to many other processes. The data is therefore multiplied, as opposed to being divided.
\paragraph{Scatter}
Scatter is a close relative to broadcast. It is also a one-to-many operation, but here the data is divided into equally large chunks and is distributed among multiple processes (including the process originally containing all the data). This could for instance be sending all entries in a collection to different processes that individually process their part.
\paragraph{Gather}
Gather is a many-to-one operation and is the inverse of scatter. Here data from many processes are sent to one of them. This operation often implies a hierarchy of the processes containing the data, where the process highest in the hierarchy receives all the data (also from itself).
\paragraph{Reduce}
Reduce is a many-to-one operation. Here one operation, called the reduction function, is done on data from multiple processes and the result is placed in one of them. As with gather, a hierarchy is also customary and the process highest in the hierarchy receives the result of the reduction. All reduction functions must be both associative and commutative, so results can be reduced without concern for the order of operations \cite{mpi1}.
\subsubsection{System Buffer}
Consider the following: When sending messages, what happens if the receiver is not ready to process them? To solve this problem, MPI dictates that an implementation must be able to store messages, however the way this is done is up to the implementer.
One way to do this is with a system buffer. In short terms, a system buffer works as an inbox, and sometimes also an outbox, where messages are stored until they can be processed. A few things to note about this inbox, is that it is supposed to work "behind the scenes", not manageable by the programmer. However, what the programmer needs to realise, is that this buffer is a finite resource, which will be exhausted if one is not cautious \cite{compLLNLMPI}.
\subsubsection{Blocking and Non-blocking Sends}
Messages can be divided into two distinct groups, the blocking sends and the non-blocking sends. The straight forward approach is the non-blocking send, where the sender assumes, or is certain, that the receiver is ready to handle the message. These types of messages return almost immediately, but there is no guarantee that the message was received, or how it was received.
On the other hand there is the blocking send. This only returns after it is safe to modify the application buffer again. The sent data could still be sitting in a buffer, but it is considered safe.
Generally, blocking sends are used for cases where it is crucial that the communication has been completed, and non-blocking sends are used to increase performance in cases where the communication is not crucial \cite{compLLNLMPI}.
\subsubsection{Order and Fairness}
When sending messages, the order in which they are sent and received can matter a great deal. MPI gives a few guarantees as to how this can be expected to happen. One must note that these rules are not enforced if multiple threads are participating in the communication.
When using MPI, messages will not overtake each other. If one task sends out two messages in succession, the message sent out first, will be the first one to be received. Also, if a receiver has multiple requests looking for a matching message, the request that was made first, will get the first match.
MPI does not give any guarantees related to fairness. It is entirely possible to starve tasks. If this is not desired, the responsibility lies with the programmer \cite{compLLNLMPI}.
| {
"alphanum_fraction": 0.8093883357,
"avg_line_length": 126.54,
"ext": "tex",
"hexsha": "a9721faf0e9927311c1b780e1225a2a09b904fb9",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2016-04-12T20:49:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-04-12T20:49:43.000Z",
"max_forks_repo_head_hexsha": "1fb4ce407174224efce92aa3ee5e1ac5704a307b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "simonvandel/P4",
"max_forks_repo_path": "Report/Analysis/MPI/MPI.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1fb4ce407174224efce92aa3ee5e1ac5704a307b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "simonvandel/P4",
"max_issues_repo_path": "Report/Analysis/MPI/MPI.tex",
"max_line_length": 608,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "1fb4ce407174224efce92aa3ee5e1ac5704a307b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "simonvandel/TLDR",
"max_stars_repo_path": "Report/Analysis/MPI/MPI.tex",
"max_stars_repo_stars_event_max_datetime": "2015-02-18T13:38:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-18T13:38:49.000Z",
"num_tokens": 1266,
"size": 6327
} |
\chapter{Interoperation}
\label{chap:interop}
| {
"alphanum_fraction": 0.7708333333,
"avg_line_length": 9.6,
"ext": "tex",
"hexsha": "9b898a5ff6dd36e5cd274f4bdfa7cfad500c3c89",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2c1ef1c673cb22b1ee2739da4ac9d63273a90a31",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "StanfordLegion/legion-manual",
"max_forks_repo_path": "interop.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2c1ef1c673cb22b1ee2739da4ac9d63273a90a31",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "StanfordLegion/legion-manual",
"max_issues_repo_path": "interop.tex",
"max_line_length": 24,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "2c1ef1c673cb22b1ee2739da4ac9d63273a90a31",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "StanfordLegion/legion-manual",
"max_stars_repo_path": "interop.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-05T19:03:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-21T16:09:53.000Z",
"num_tokens": 13,
"size": 48
} |
\documentclass[main.tex]{subfiles}
\begin{document}
\section{Applications} \label{sec:applications}
% Definitions from \cite[]{matarresePathintegralApproachLargescale1986}.
% The second connected correlation function, computed with a smoothing at a scale \(R\), can be used to define a variance: \(\xi^{(2)} (x, x) \sim \sigma _R^2\).
% With this we can then quantify ``peaks'' in the density perturbation \(\delta (x) = (\rho - \expval{\rho }) / \expval{\rho }\).
% Specifically, we define a function \(\rho_{\nu , R}\) so that it is equal to \(1\) when the perturbation is \(\nu \) sigmas above the average (which is \(\delta = 0\) by definition), and \(0\) otherwise:
% %
% \begin{align}
% \rho_{\nu , R} = \Theta (\delta (x) - \nu \sigma _R)
% \,.
% \end{align}
% We can define an \(N\)-point correlation for this ``boolean density'':
% %
% \begin{align}
% \Pi^{(N)}_{\nu , R} (x_1 \dots x_N) = \expval{\prod_{r=1}^{N} \rho_{\nu , R}(x_r)}
% \,
% \end{align}
% %
% quantifies the probability that the perturbation is above the threshold at all the positions \(x_r\).
% We then define the \emph{peak} correlation function
% %
% \begin{align}
% \xi^{(N)}_{\text{dis}, \nu , R} = \expval{\frac{\prod_{r=1}^{N} \rho_{\nu , R} (x_r)}{\expval{\rho_{\nu , R}}^{N} }} - 1
% \,.
% \end{align}
% % \todo[inline]{understand its physical meaning}
% According to equations 16 and 22 in \cite[]{matarresePathintegralApproachLargescale1986} the two-point correlation function with two different smoothing scales and thresholds is allowed to have zero-crossings.
% \todo[inline]{What does \(\xi_{cc} \propto \xi_{gg}\) mean? These are the two-point correlation functions for rich cluster and galaxies. Do they correspond to \(\xi^{(N)}_{\text{dis}, \nu, R}\) for different values of \(\nu \)? What is the connection between this observable quantity and the mathematical formalism of peak correlation functions?}
% The paper also finds \emph{scaling relations} between a certain order \(N\)-point peak correlation function and lower order ones, as well as the background correlation functions \cite[eqs.\ 24--25]{matarresePathintegralApproachLargescale1986}.
% The result is interesting since it is general, allowing for the computation to be performed with a general non-Gaussian background; in the Gaussian case it reduces to
%
% \begin{align}
% \zeta (1, 2, 3) &= \xi (1, 2) \xi (2, 3) + \xi (1, 3) \xi (2, 3) + \xi (1, 2) \xi (1, 3) + \xi (1, 2) \xi (2, 3) \xi (1, 3) \\
% \zeta &\sim \sum \xi^2 + \xi^3
% \,,
% \end{align}
%
% where \(\zeta \) is the three-point peak correlation function, \(\xi \)is the two point one, and we denote \(x_i \equiv i\) for compactness.
% The second expression is just a compact form for the first.
%
% This expression does not seem to fit observational data: specifically, the \(\xi^3\) term has not been found in observations.
% The aforementioned general expression can be written as
% %
% \begin{align}
% \zeta = F \qty(\sum \xi^2 + \xi ^3) + (F-1) \qty(1 + \sum \xi )
% \,,
% \end{align}
% %
% where \(F = F(1, 2, 3) = 1\) in the Gaussian case.
% \todo[inline]{I do not understand the motivation behind equation 28 in the same paper? Is it meant to make it so the \(\xi^3\) term vanishes? }
% \todo[inline]{Is the \(\xi^3\) term predicted by Gaussian statistics but not observed?}
% \todo[inline]{I don't see where \(\overline{f}(r) = \expval{f(r) | \Gamma } = \xi _i (r) \xi^{-1}_{ij} c_j\) comes from in general --- I can see it in the specific case \(C_i = f(r_i)\) through.}
% They account for non-gaussianity through field (overdensity or gravitational potential) in the form \(\alpha \phi + \epsilon (\phi^2 - \expval{\phi^2 }) \), where \(\phi \) is Gaussian with zero mean.
% They use the Press–Schechter formalism, considering the regions in which the linearized density perturbation goes beyond \(\Delta _c \approx 1.687\).
% Figure 4: the effect of adding the non-Gaussian tails is to boost by an order of magnitude the probability \(\mathbb{P}(> \delta _c | z_c, R)\), where \(z_c\) is the redshift we are considering while \(R\) is the filtering scale.
% Equation 4.25 in \textcite[]{verdeMultivariateJointPDF2013} is a calculation tool: it allows us to calculate the average of an observable \(A\) for a non-Gaussian random field in a discretized space.
% \textcite[]{planckcollaborationPlanck2018Results2019}: I guess I can interpret the PDF for the nongaussianity parameter, but not much else\dots
% \textcite[]{celoriaPrimordialNonGaussianity2018}: is EEE, TTT and such related to the three-point functions computed with different polarizations?
\end{document}
| {
"alphanum_fraction": 0.6891659501,
"avg_line_length": 56.7317073171,
"ext": "tex",
"hexsha": "55a2247b8ef88aaad9ad07077752fb5a89f8636f",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z",
"max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jacopok/notes",
"max_forks_repo_path": "ap_second_semester/path_integral/applications.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jacopok/notes",
"max_issues_repo_path": "ap_second_semester/path_integral/applications.tex",
"max_line_length": 348,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jacopok/notes",
"max_stars_repo_path": "ap_second_semester/path_integral/applications.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z",
"num_tokens": 1430,
"size": 4652
} |
\documentclass[11pt,a4paper]{article}
% These are extra packages that you might need for writing the equations:
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{booktabs}
\usepackage{hyperref}
\usepackage{listings}
\usepackage{xcolor}
\usepackage{outlines}
\usepackage{mathtools}
\lstset {language=C++,
basicstyle=\ttfamily,
keywordstyle=\color{blue}\ttfamily,
stringstyle=\color{red}\ttfamily,
commentstyle=\color{purple}\ttfamily,
morecomment=[l][\color{magenta}]{\#},
basicstyle=\tiny}
% You need the following package in order to include figures in your report:
\usepackage{graphicx}
% With this package you can set the size of the margins manually:
\usepackage[left=2cm,right=2cm,top=2cm,bottom=2cm]{geometry}
\renewcommand{\vec}[1]{\mathbf{#1}}
\newcommand{\norm}[1]{\left\lVert#1\right\rVert}
\begin{document}
% Enter the exercise number, your name and date here:
\noindent\parbox{\linewidth}{
\parbox{.25\linewidth}{ \large ICP, Exercise 11 }\hfill
\parbox{.5\linewidth}{\begin{center} \large Beat Hubmann \end{center}}\hfill
\parbox{.2\linewidth}{\begin{flushright} \large Dec 07, 2018 \end{flushright}}
}
\noindent\rule{\linewidth}{2pt}
\section{Introduction}
The Jacobi relaxation method and the Gauss-Seidel method were implemented to solve
the discretized Poisson equation in two dimensions for point charges in a grounded box.
The two methods were compared with each other in terms of time to solution and number of iterations required.
Also, the time to solution was compared to a high performance library Cholesky solver.
\section{Algorithm Description}
For all cases, the two-dimensional Poisson equation(equation~\ref{eqn:1}) on $\Omega$ is
discretized using second-order central finite differences in both the x- and the y-direction (equation~\ref{eqn:2}).
Both axes share a common grid spacing of $\Delta x= \frac{1}{N+1}$ where $N$ is the number of interior points
per axis direction on the grid.
Following the established finite difference method procedure to employ natural ordering, the left-hand side of equation~\ref{eqn:2} then can be written
in form of an $N*N \times N*N$ matrix $\vec{A}$ while the values of $\phi$ on the grid get unrolled into a vector $\vec{b}$ of size $N*N$ on the right-hand side (equation~\ref{eqn:3}).\\
The resulting matrix $A$ is both sparse and block tridiagonal.
\begin{equation}
\Delta \Phi = -\phi \quad \text{on}\ \Omega = (0, 1) \times (0,1)
\label{eqn:1}
\end{equation}
\begin{equation}
4x_{i,j} - x_{i-1, j} - x_{x+1, j} - x_{i, j-1} - x_{i, j+1} = -(\Delta x)^2 \cdot \rho(x_{i,j})
\label{eqn:2}
\end{equation}
\begin{equation}
Ax = b
\label{eqn:3}
\end{equation}
\subsection{Jacobi relaxation method}
In matrix form, the Jacobi method works by decomposing the matrix $\vec{A}$ into a matrix $\vec{D}$ consisting only of $\vec{A}$'s main diagonal
and a remainder matrix $\vec{R} = \vec{A} - \vec{D}$. Starting with an initial guess for $\vec{x} = (1, 1, \ldots, 1)^{\text{T}}$, we iterate $\vec{x}^{(t+1)} = \vec{D}^{-1}(\vec{b}- \vec{R}\vec{x}^{(t)})$
until $\norm{\vec{x}^{(t+1)} - \vec{x}^{(t)}}$ becomes smaller than a chosen convergence treshold in a chosen norm.
\subsection{Gauss-Seidel method}
Similar to the Jacobi relaxation method, the matrix $\vec{A}$ is decomposed into a lower triagonal matrix $\vec{L}$ and a strictly upper triagonal matrix $\vec{U}$ such that
$\vec{A} = \vec{L} + \vec{U}$. Completely analogous to the Jacobi relaxation method, the iteration step is $\vec{x}^{(t+1)} = \vec{L}^{-1}(\vec{b}- \vec{U}\vec{x}^{(t)})$.
\section{Results}
The program was implemented as described above and submitted with this report. \\
Both methods reached the set conversion treshold of $\norm{\vec{x}^{(t+1)} - \vec{x}^{(t)}}_{2} \le 10^{-4}$. \\
The Jacobi relaxation method took $t=3478$ iterations but on average only $\sim 45 \text{ms}$ to do so, while the Gauss-Seidel method
only took $t=1922$ iterations but $\sim 6400 \text{ms}$ to reach the same treshold. For comparison, Eigen's optimized library Cholesky method
solver obtained the reference solution in $9 \text{ms}$.\\
Both methods' solutions reached similar deviation from the reference Cholesky solution:\\
$\norm{\vec{x}^*_{\text{Jacobi}} - \vec{x}^*_{\text{Cholesky}}}_{2} \approxeq \norm{\vec{x}^*_{\text{Gauss-Seidel}} - \vec{x}^*_{\text{Cholesky}}}_{2} \approxeq 0.05$.
The heat maps for all three solvers are shown in figures~\ref{fig:1}, \ref{fig:2} and~\ref{fig:3}.
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=1.2]{figure_1.png}
\end{center}
\caption{Cholesky solver reference solution for Poisson equation with point charges at $(0.25, 0.75)$, $(0.75, 0.25)$.}
\label{fig:1}
\end{figure}
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=1.2]{figure_2.png}
\end{center}
\caption{Jacobi relaxation solver solution for Poisson equation with point charges at $(0.25, 0.75)$, $(0.75, 0.25)$.}
\label{fig:2}
\end{figure}
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=1.2]{figure_3.png}
\end{center}
\caption{Gauss-Seidel solver solution for Poisson equation with point charges at $(0.25, 0.75)$, $(0.75, 0.25)$.}
\label{fig:3}
\end{figure}
\section{Discussion}
The results are as expected. Further consideration should be given to investigating why the Gauss-Seidel solver
in this implementation is an order of magnitude slower than the Jacobi relaxation solver.
%\begin{thebibliography}{99}
% \bibitem{metropolis}
% Metropolis, N.,
% Rosenbluth, A.W.,
% Rosenbluth, M.N.,
% Teller, A.H.,
% Teller, E.\\
% \emph{Equations of State Calculations by Fast Computing Machines},\\
% Journal of Chemical Physics. 21 (6): 1087,\\
% 1953.
% \bibitem{herrmann}
% Herrmann, H. J.,
% Singer, H. M.,
% Mueller L.,
% Buchmann, M.-A.,\\
% \emph{Introduction to Computational Physics - Lecture Notes},\\
% ETH Zurich,\\
% 2017.
% \bibitem{Gottschling}
% Gottschling, Peter\\
% \emph{Discovering Modern C++},\\
% Addison-Wesley,\\
% 2016.
%\end{thebibliography}
\end{document} | {
"alphanum_fraction": 0.7089920949,
"avg_line_length": 36.8,
"ext": "tex",
"hexsha": "b4fb3d3a638e995627ca5fd4039137a54357d176",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2ad1bcef73f3f43d832031cf45c4909341176ebd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "BeatHubmann/18H-ICP",
"max_forks_repo_path": "ex11/ex11_report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2ad1bcef73f3f43d832031cf45c4909341176ebd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "BeatHubmann/18H-ICP",
"max_issues_repo_path": "ex11/ex11_report.tex",
"max_line_length": 205,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2ad1bcef73f3f43d832031cf45c4909341176ebd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "BeatHubmann/18H-ICP",
"max_stars_repo_path": "ex11/ex11_report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1928,
"size": 6072
} |
\documentclass[12pt]{article}
\title{On the Connection between Reading and Writing}
\author{Robert Krency}
\date{\today}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{tabularx}
% Geometry
\usepackage{geometry}
\geometry{letterpaper, left=1in, top=1in, right=1in, bottom=1in}
% Add vertical spacing to tables
\renewcommand{\arraystretch}{1.4}
% Use Double Spacing for the paper
\usepackage{setspace}
\doublespacing
% Add space between columns
\setlength\columnsep{30pt}
% Begin Document
\begin{document}
\section{The Writing Process}
The writing process consists of the following steps, that tend to overlap and be repeated in varying orders:
\begin{tabular}{c | c}
Active Reading & Active Writing \\ \hline
Pre-Reading & Planning \\
Close Reading & Drafting \\
Post Reading & Revising \\
\end{tabular}
\subsection{Pre-Reading}
Prereading leads us to ask question: Who? What? When? Where? Why? How?
Questions lead us to answers which lead to more questions, and for the foundation of a written work.
\subsubsection{Example: Camping Out by Hemingway}
\begin{itemize}
\item Who? Primarily men
\item What? Camping
\item Where? Newspaper in Toronto
\item When? June, 1920
\item Why?
\item How?
\end{itemize}
When first skimming a book, what sections are important to recognize?
The \textbf{index} is a reference to find sections in the text where a topic is discussed.
\subsection{Close Reading \& Post Reading}
Do our perceptions of the work change as we read?
What do we understand or change during later readings?
When marking up a paper, leave notes on why certain bits are marked.
Why did I underline this?
Write a summary when finished reading.
Consider the three facets:
\begin{enumerate}
\item Audience
\item Purpose
\begin{itemize}
\item to Persuade
\item to Inform
\item to Entertain
\end{itemize}
\item Tone
\end{enumerate}
\section{Active Writing}
\subsection{Planning}
\begin{itemize}
\item Brainstorm
\item Freewrite
\item Outline
\item Map
\item Cluster
\end{itemize}
\pagebreak
\subsection{Example: Choose your favorite word}
\subsubsection{Brainstorming}
\begin{itemize}
\item Does this mean the word/phrase itself?
\begin{itemize}
\item Bookkeeper, Strength
\end{itemize}
\item Does it represent its concept?
\begin{itemize}
\item Palindromes
\end{itemize}
\item Catchphrase?
\begin{itemize}
\item Everybody panic!
\end{itemize}
\end{itemize}
\subsubsection{Freewriting}
The concept of a favorite word is an odd idea.
The concept itself presupposes that a word is more than utilitarian, that it might be sentimental or have meaning to a specific person.
This brings to mind then three different questions relating to the idea of a favorite word.
The first of these questions is: is the word itself, free of context and only equipped with its dictionary meaning, a favorite?
Certain qualities that make a word stand out amongst its peers would be perhaps the spelling or an idea related to its structure.
Perhaps the word has certain meaning or sentimental value, as though it was a shared secret passphrase between siblings.
For instance, ``bookkeeper'' has the odd distinction of 3 repeated pairs of characters in a row.
Its variants are the only such words that demonstrate this feature.
\begin{itemize}
\item Notable bits to reuse:
\begin{itemize}
\item concept itself presupposes there exist features that make a word standout
\item the word itself, free of context and only equipped with its dictionary meaning
\item ``bookkeeper'' and its variants
\item shared secret passphrase
\end{itemize}
\end{itemize}
\subsubsection{Write an informative paragraph}
Is it possible to favor one word over any other?
Words are equipped only with a dictionary meaning, lacking any context.
The concept of a favorite word then presupposes that there exists features that make a word worthy above its peers.
For instance, the structure of a word can bend the attention of a curious sort such as myself.
The word ``bookkeep'', and its variants, is notable for its three pairs of repeated characters uninterrupted.
To my knowledge, this is the only such instance of this pattern in the English language.
\subsubsection{Write a persuasive paragraph}
Having a favorite word may seem an odd idea at first glance.
However, its usage can be of great importance, perhaps as a great conversation piece or shared secret passphrase.
Take the word ``bookkeep'' for instance.
The structure of this word, and its variants, is unique amongst the modern English language.
It features three pairs of characters uninterrupted, filling this writer's ideas of the previously shared scenarios.
The structure can lead to an interesting conversation about finding similar words with repeated characters.
Or perhaps its blandness makes it interesting for a secret passphrase, as it does not stand out amongst funny sounding words such as boondoggle that cannot be easily placed in everyday conversation.
\subsubsection{Write an entertaining paragraph}
Travel back to your youth, a time of freedom and fantasy.
For many, a secret clubhouse or treehouse often marked a rite of passage amongst peers or older siblings.
And of course, every secret clubhouse needs a secret passphrase.
This favored word then would need to be instantly recognizable to those in the know, and unassuming to those not.
The word ``bookkeep'' for instance is easily slotted into bland conversation that makes most slip away to their mental happy place.
However, its structure of three repeated characters uninterrupted makes it an interesting candidate.
This writer imagines a challenge to join a treehouse society, in which a riddle can be concocted.
``Three pairs are we, two vowels of secrecy, intruded by \text{?`}Que? ''
\subsubsection{Read all three paragraphs. Describe which one you like best and why.}
I have to pat myself on the back for that little riddle; though it is rather difficult.
The informative paragraph is the most direct in discussing the paragraph.
The idea of a favorite word is a personal choice, as are favorites by nature, thus making a persuasive argument somewhat futile.
An entertaining paragraph is more difficult, but a story about using the word is interesting.
Overall, I think the informative paragraph suits the word better as the concept of the favorite word is defined based on its structure,
the personal meaning being indirect as it requires the predisposition towards patterns.
\subsubsection{Why did you choose the word that you did? What value did you find the assignment?}
The structure of the word makes it unique with its pattern of three pairs of characters uninterrupted.
This assignment is a good demonstration in the preconceived notions the writer has about what makes up a different audience and how to write to each.
Writing informatively, persuasively, or entertainingly require different structure and tone.
A single topic can be written about it each of the styles with varying outcomes, perhaps some more effective than others.
\section{Ben Healy: ``Gossip is Good''}
\begin{enumerate}
\item List two things about the author from the introduction.
\begin{itemize}
\item Illinois, Brown University and U of Texas
\item Editor for the Atlantic
\end{itemize}
\item What is the author's purpose?
\begin{itemize}
\item to persuade the audience that research shows that gossip is beneficial, against ages-old public sentiment and ``proper manners''
\end{itemize}
\item What is the author's tone?
\begin{itemize}
\item Informational, yet light
\end{itemize}
\item List two people Healy mentions in his essay
\begin{itemize}
\item Blythe Holbrooke - journalist that jokingly described a mathematical formula for how much gossip spreads
\item Robin Dunbar - evolutionary psychology plays a major role in making gossip a foundation of shared identity
\end{itemize}
\item List two universitives that Healy mentions in his essay
\begin{itemize}
\item University of Oklahoma, U of Texas
\item Stanford and Berkeley
\end{itemize}
\item Provide a brief summary of the essay.
\begin{itemize}
\item Healy goes against the grain of ``common sense'' that gossip is only detrimental to society.
Citing a hefty amount of research done by top institutions, such as U of Oklahoma and U of Texas, Healy shows how society can be helpful.
Gossip, as defined by the author to be negative interpersonal conversation about absent parties, makes up nearly two-thirds of conversation.
One of the benefits Healy brings from the study includes strengthening connections between two people when common negative connections are shared.
Another benefit was that participants were more reflective of themselves.
Robin Dunbar brings in the viewpoint that gossip was a feature of evolutionary psychology, helping people gain a sense of shared identity.
\end{itemize}
\end{enumerate}
\pagebreak
\subsection{The Best Meal}
The best recent meal I had was that Prime Rib at By George last Saturday.
Just thinking about it gets my mouth watering.
While the potato pairing was subpar, the appetizer, main course, and dessert were some of the best I've ever had; the company was pretty good too, I suppose.
I tried something new with the salad, the Greek vinegarette.
Surprisingly really good.
The meat though\ldots that was perfection.
It was cooked beautifully, nice and pink.
The au jus was a perfect temperature, and not overly salty, with just a proper amount on top when served.
The ratio of meat to fat was amazing, and the seasoning on the crust was not overpowering.
It even came with a little sprig of greenery poking out for ambience.
Then there was the vanilla bean cheesecake.
Utterly the best cheesecake I've ever had.
It was as creamy as could be.
The crust was the perfect sizing, and never overpowered the taste of the cheesecake.
This delicious meal that was in front of me was a birthday present from my father and stepmother.
It has been a long time since I've eaten a meal better, and I absolutely will be going back.
George, you have yourself a new fan.
\end{document} | {
"alphanum_fraction": 0.7625692984,
"avg_line_length": 40.2384615385,
"ext": "tex",
"hexsha": "d37806cb606481fac354baae42a9610903f4ce16",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-02-23T21:09:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-28T19:39:27.000Z",
"max_forks_repo_head_hexsha": "2ca2b95d3a502551870fe3203ba4e97969d2835f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Anthony91501/uwp-2022-spring",
"max_forks_repo_path": "ENG 101/Notes/notes.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2ca2b95d3a502551870fe3203ba4e97969d2835f",
"max_issues_repo_issues_event_max_datetime": "2022-01-31T17:51:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-29T01:30:06.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Anthony91501/uwp-2022-spring",
"max_issues_repo_path": "ENG 101/Notes/notes.tex",
"max_line_length": 198,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "2ca2b95d3a502551870fe3203ba4e97969d2835f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Anthony91501/uwp-2022-spring",
"max_stars_repo_path": "ENG 101/Notes/notes.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-22T05:53:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-15T21:17:08.000Z",
"num_tokens": 2338,
"size": 10462
} |
\documentclass{beamer}
\usetheme{metropolis}
\usepackage[utf8]{inputenc} % Input-indkodning af tegnsaet (UTF8)
\usepackage[T1]{fontenc} % Output-indkodning af tegnsaet (T1)
\usepackage[danish, english]{babel} % Dokumentets sprog
\usepackage{pgf}
\usepackage{tikz}
\usetikzlibrary{arrows,automata}
\title{On dDOS Attack}
\author{Lars Nielsen}
\begin{document}
\maketitle
\section{What is a dDOS Attack}
\begin{frame}{What is a dDOS Attack}
\begin{itemize}
\item It is a non destructive cyberattack
\end{itemize}
\end{frame}
\begin{frame}[noframenumbering]{What is a dDOS Attack}
\begin{itemize}
\item It is a non destructive cyberattack
\item It is used for blocking access to an wLAN or LAN connected resource
\end{itemize}
\end{frame}
\begin{frame}[noframenumbering]{What is a dDOS Attack}
\begin{itemize}
\item It is a non destructive cyberattack
\item It is used for blocking access to an wLAN or LAN connected resource
\item It is, per definition, a distributed attack
\end{itemize}
\end{frame}
\begin{frame}[noframenumbering]{What is a dDOS Attack}
\begin{itemize}
\item It is a non destructive cyberattack
\item It is used for blocking access to an wLAN or LAN connected resource
\item It is, per definition, a distributed attack
\item It is, dangerously simple to implement
\end{itemize}
\end{frame}
\begin{frame}
\begin{center}
So what does a dDOS attack look Like?
\end{center}
\end{frame}
\begin{frame}{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos}
\end{center}
\end{frame}
\begin{frame}[noframenumbering]{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos_2}
\end{center}
\end{frame}
\begin{frame}[noframenumbering]{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos_3}
\end{center}
\end{frame}
\begin{frame}[noframenumbering]{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos_4}
\end{center}
\end{frame}
\begin{frame}[noframenumbering]{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos_5}
\end{center}
\end{frame}
\begin{frame}[noframenumbering]{So what does a dDOS attack look like?}
\begin{center}
\input{images/ddos_6}
\end{center}
\end{frame}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
| {
"alphanum_fraction": 0.7246192893,
"avg_line_length": 28.1428571429,
"ext": "tex",
"hexsha": "23360c4eab8c99d3d09552b01dfd639f920af3a2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e9ead3057b9aa61a97677a477dd9b9791f5ca540",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "looopTools/scripts",
"max_forks_repo_path": "python/advance_ddos/presentation/on_ddos.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e9ead3057b9aa61a97677a477dd9b9791f5ca540",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "looopTools/scripts",
"max_issues_repo_path": "python/advance_ddos/presentation/on_ddos.tex",
"max_line_length": 75,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e9ead3057b9aa61a97677a477dd9b9791f5ca540",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "looopTools/scripts",
"max_stars_repo_path": "python/advance_ddos/presentation/on_ddos.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 783,
"size": 2364
} |
\documentclass[a4paper,12pt]{scrartcl}
% font packages
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
% packages for mathematical type setting
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{bbm}
% packge for norm command
\usepackage{physics}
% references
\usepackage{cleveref}
\author{\normalsize Linus Böhm, Jurek Rostalsky}
\title{Benign Overfitting}
\date{}
% formatting
\setlength{\parindent}{0pt}
\pagestyle{empty}
% definition
\theoremstyle{definition}
\newtheorem{definition}{Definition}[section]
\begin{document}
\maketitle
\section{Basic definitions} \label{sec:basic_definitions}
Let \(x \in \mathbb{H}, y \in \mathbb{R}\) with zero mean. Where \(\mathbb{H}\) is a Hilbert space.
\begin{definition} [covariance matrix]
\label{def:covarianvce_matrix}
\begin{equation}
\Sigma = \mathbb{E}\left(\left(x - \mathbb{E}(x)\right)\left(x - \mathbb{E}(x)\right)^T\right) = \mathbb{E}(xx^T)
\end{equation}
\end{definition}
\begin{definition} [linear regression]
\label{def:linear_regression}
The problem of finding a parameter vector \(\theta^\ast \in \mathbb{H}\) with
\begin{equation}
\theta^\ast = arg \min\limits_\theta \mathbb{E}\left((y - x^T \theta)^2\right)
\end{equation}
is called \textbf{linear regression}.
\end{definition}
Let \(\left((x_1, y_1), ..., (x_n, y_n)\right) \in (\mathbb{H} \times \mathbb{R})^n\) a list of \(n\) sampled data points. Now we define the matrix \(X = \big(x_1 \, x_2 \, ... \, x_n\big)\) and the vector \(y = (y_1 \, y_2 \, ... \, y_n)^T\). If there is a \(\theta \in \mathbb{H}\) with \(y - X^T \theta = 0\) that \(\theta\) is a minimum of the linear regression problem sind the expectation of a square is non negative. Usually such a \(\theta\) isn't unique, so we are interested in the minimum norm \(\theta\) with that property.
\begin{definition}[minimum norm estimator]
For given samples \(X \in \mathbb{H}^n, y \in \mathbb{R}^n\). The \textbf{minimum norm estimator} \(\theta\) is the solution of the QQP:
\begin{align*}
\label{eq:QQP}
\hat{\theta} = arg \min\limits_{\theta} &\norm{\theta}^2 & \text{subject to: } \norm{X^T \theta - y}^2 = \min\limits_\beta \norm{X^T \beta - y}^2 && \text{(QQP)}\\
\end{align*}
\end{definition}
The minimum norm estimator can be obtained by solving the normal equation:
\begin{equation}
\label{eq:normal_equation}
XX^T \theta = X y,
\end{equation}
which can be done by numerical stable with QR-decomposition.
\newpage
\begin{definition} [Excess risk]
\label{def:Excess risk}\ \\
$\mathbb{E}_{x,y}$ denotes the conditional expectation , then define:
\begin{equation}
R:= \mathbb{E}_{x,y}[(y - x^T\theta)^2 - (y - x^T\theta^*)^2]
\end{equation}
\end{definition}
\begin{definition} [Effective Ranks]
\label{def:Effective Ranks}\ \\
For the covariance operator $\sum$, define $\lambda_i = \mu_i(\sum)$ for $i = 1,2,...$ . Whereby \newline $\mu_1(\sum) \geq \mu_1(\sum) \geq ...$ . If $\sum\limits_{i=1}^\infty \lambda_i < \infty$ and $\lambda_{k+1} > 0$ for $k \geq 0,$ define:
\begin{equation}
r_k(\sum) = \frac{\sum_{i>k}\lambda_i}{\lambda_{k+1}} ,\hspace*{2cm}
R_k(\sum) = \frac{(\sum_{i>k}\lambda_i)^2}{R_k(\sum_{i>k})\lambda_i^2}
\end{equation}
\end{definition}
\newpage
\section{Theorems} \label{sec:Theorems}
\newtheorem{thm}{Theorem}
\begin{thm}\ \\
For any $\sigma_x$ there are $b,c,c_1 > 1$, for which the following holds. Consider a linear regression problem from definition~\ref{def:linear_regression}. Define:
\begin{equation}
\begin{aligned}
k^* = \min \{k \geq 0: r_k(\sum) \leq bn\},
\end{aligned}
\end{equation}
Where the minimum of the empty set is defined as $\infty$. Suppose $\delta < 1$ with $\log(\frac{1}{\delta}) < n/c$. If $k^* \leq n/c_1$, then $\mathbb{E}R(\hat{\theta}) \leq \delta^2/c.$ Otherwise,
\begin{equation}
\begin{aligned}
R(\hat{\theta}) \leq c(\norm{\theta^*}^2\norm{\sum}\max{\sqrt{\frac{r_0(\sum)}{n}},\frac{r_0(\sum)}{n},\sqrt{\frac{\log(1/\delta)}{n}}}) + c\log(\frac{1}{\delta})\sigma_y^2\left(\frac{k^*}{n} + \frac{n}{R_{k^*(\sum)}}\right)\\
\end{aligned}
\end{equation}
with probability at least $1 - \delta$, and
\begin{equation}
\begin{aligned}
\mathbb{E}R(\hat{\theta}) \geq \frac{\sigma^2}{c} \left(\frac{k^*}{n} + \frac{n}{R_{k^*(\sum)}}\right)
\end{aligned}
\end{equation}
Moreover there are universal constants $a_1.a_2,n_0$ such that $\forall n \geq n_0, \forall \sum, \forall t \geq 0$ there is a $\theta^*$ with $\norm{\theta^*} = t$ such that for $x \sim N(0,\sum)$ and $y|x \sim N(x^T\theta^*,\norm{\theta^*}^2\norm{\sum})$, with probability at least $1/4$,
\begin{equation}
\begin{aligned}
R(\hat{\theta})\geq \frac{1}{a_1}\norm{\theta^*}^2\norm{\sum}\mathbbm{1}_{\left[\frac{r_0(\sum)}{n\log(1+r_0(\sum))} \geq a_2\right]}
\end{aligned}
\end{equation}
\end{thm}
\begin{thm}\ \\
The excess risk of the minimum norm estimator satisfies:
\begin{equation}
\begin{aligned}
R(\hat{\theta)}) & \leq 2(\theta^*)^TB\theta^* + c\delta^2\log(\frac{1}{\delta})tr(C) \text{ and} \\
\mathbb{E}_\epsilon R(\hat{\theta}) & \geq (\theta^*)^TB\theta* + \delta^2tr(C) \text{ where}\\\\
B & = (I - X^T(XX^T)^{-1}X)\sum(I - X^T(XX^T)^{-1}X), \\
C & = (XX^T)^{-1}X\sum X^T(XX^T)^{-1}
\end{aligned}
\end{equation}
\end{thm}
\end{document}
| {
"alphanum_fraction": 0.6589015152,
"avg_line_length": 35.9183673469,
"ext": "tex",
"hexsha": "ab69aeba74fb86fea1da32f9d74805508dbd07e2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f3bd4f5d9455f4402440bd5abbc813c152aa866b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Mathemalsky/BenignOverfitting",
"max_forks_repo_path": "script/BenignOverfitting.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f3bd4f5d9455f4402440bd5abbc813c152aa866b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Mathemalsky/BenignOverfitting",
"max_issues_repo_path": "script/BenignOverfitting.tex",
"max_line_length": 535,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f3bd4f5d9455f4402440bd5abbc813c152aa866b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Mathemalsky/BenignOverfitting",
"max_stars_repo_path": "script/BenignOverfitting.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1986,
"size": 5280
} |
\section{Symmetric encryption}
| {
"alphanum_fraction": 0.7878787879,
"avg_line_length": 8.25,
"ext": "tex",
"hexsha": "f52db79b09a4c0f28d7862dc62e50e9ab55d80b6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/statistics/encryptionModern/02-00-Symmetric.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/statistics/encryptionModern/02-00-Symmetric.tex",
"max_line_length": 30,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/statistics/encryptionModern/02-00-Symmetric.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8,
"size": 33
} |
\openepigraph{Memory... is the diary that we all carry about with us.}{---Oscar Wilde}
\section{Excel Tips}
This document gives a brief introduction to using excel for data analysis. The guide covers basic excel operation, formula use, and pivot tables. Many first time users will find aspects of excel confusing, this guide is intended to give you enough basic knowledge to get you started. There are extensive help guides published on the internet, and if you find yourself confronted with a problem that is not discussed here you will often find valuable help by searching Google for the answer. If you have suggestions for excel tips that should be included in this document please let your lab instructor know.
\subsection{What is Excel?}
Excel is a spreadsheet tool. It allows you to organize and analyze data that are stored in column and row format. Excel is extremely powerful. With enough know-how almost all of the data analysis that statistics programs like SPSS are built for, can be accomplished directly in Excel.
\subsection{What is Excel used for in this course?}
You can use excel (or another spreadsheet program) to:
\begin{itemize}
\item Store your data,
\item to perform basic analysis of your data (e.g., getting averages and standard deviations, computing correlations etc.), and
\item to create figures, graphs, and tables to present your data.
\end{itemize}
\section{Using Excel}
\subsection{The excel window}
Excel is a spreadsheet that contains different rows (the numbers), and different columns (the letters).
The individual cells can be used to hold data in the form of numbers or letters, to analyze data, and to report data using tables and figures.
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/Excel1.pdf}
\caption{The excel window}
\label{fig:excelwindow}
\end{figure}
\subsection{Inputting data}
Type or paste individual numbers or letters into individual cells, and then press return.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel2.pdf}
\caption{Inputting data}
\label{fig:excel2}
\end{figure}
\subsection{Addressing cells}
Excel uses a Letter-Number system to address or point to specific cells. As you can see, all of the numbers in this example have been added to rows 2-9 in Column B. Using the Letter- Number system, the B2=1, B3=5, B4=4, B6=7, and so on.
\subsection{Setting one cell to equal another}
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel3.pdf}
\caption{Making one cell equal another}
\label{fig:excel3}
\end{figure}
If you click on an empty cell, you can make this cell have the same contents as another cell by typing the (=) sign, then clicking on the cell you want to duplicate.
E.g., click on A2, type (=), then click on B2. B2 initially had a 1 in it, now when you press enter, cell A2 will also have a 1 in it. Now if you change the contents of B2 to another number (say 5), the contents of A2 will also change to 5.
Note: this trick becomes handy later on to help you quickly manipulate data in excel
\subsection{Adding cells together, and copying commands across other cells}
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel4.pdf}
\caption{Adding}
\label{fig:excel4}
\end{figure}
Let's say you had two columns of numbers. For each row you want to compute the sum of the first and second number (e.g., the first two numbers are 1 and 1, you want to add them together to get 2.
Click on a new column (C2) and type the equal sign =
Now click on A2 (which contains 1) once with the mouse, and then click on B2 (which also contains 1). Now the formula in C2 will automatically contain =A2+B2
When you press enter the two numbers will be summed, and the answer in C2 with be 2.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel5.pdf}
\caption{Applying across cells}
\label{fig:excel5}
\end{figure}
If you want to do the same operation for all of the rows, all you have to do is click on C2. Notice there is a little blue square at the bottom right hand corner of the cell.
\subsection{Using a formula to add two numbers together}
You can do the same addition operation from above by using the sum formula. In this example you would click on C2, then type =sum(
Then you need to tell excel the range of columns and rows that you want to sum over. In this case you can just click on A2, and drag across to B2. This will make a temporary blue rectangle, which represents the cells that will be summed together
Complete the formula by using the )
Then press enter, and you will get the answer.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel6.pdf}
\caption{Adding using the sum formula}
\label{fig:excel6}
\end{figure}
You can also copy this formula down across the other cells in the same way as above
If you click on the little square, then drag down, the formula will be applied to each of the rows.
And, then you have all the answers without having to enter the formula separately for each row.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel7.pdf}
\caption{Completing the formula}
\label{fig:excel7}
\end{figure}
\subsection{Getting an average}
Using the same method as the sum formula, you can also compute the mean of a set of numbers by using the average function instead of the sum function. The process is the same. Select a cell, type =average( then select the cells you want (in the example B2:B9) and press enter.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel8.pdf}
\caption{The average formal}
\label{fig:excel8}
\end{figure}
\subsection{Other formulas}
\begin{itemize}
\item Max – finds the biggest number
\item Min- finds the smallest number
\item Stdev- computes the standard deviation
\item Countif – counts the number of times a specific value occurs
\end{itemize}
Excel has a dictionary of other functions that may be useful, you can look them using help, or using insert: function, from the menu.
\subsection{Selecting a range of cells}
Formulas usually require you to enter a range of cells to compute. The format is put the upperleftmost cell first (e.g., A2), and then the bottomrightmost cell last (e.g., C9). So, A2:C9 would signify the
following rectangle.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel9.pdf}
\caption{selecting a range}
\label{fig:excel9}
\end{figure}
\subsection{Copying a formula to another cell: relative coordinates}
If you were to now select cell A13 it would have the formula =A2:C9 inside. If you copied this cell, and then pasted it into the cell beside it A14, Excel would automatically move the rectangle over one. This is because without further specification, excel always treats cells in relative coordinates.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel10.pdf}
\caption{Relative coordinates}
\label{fig:excel10}
\end{figure}
\subsection{Absolute coordinates}
You can control whether or not excel uses relative coordinates. When you set the range you can insert the \$ sign to make sure that excel holds the rectangle in place.
For example:
A13 =A2:C9
-this formula has no \$s, as in the above example, if you copy this formula to another cell say
B13, then B13=B2:D9, and not the original A2:C9
A13=\$A\$2:\$C\$9
-This formula has \$s infront of both letter and number for each cell in the rectangle. Now, when
the formula is copied to another cell, the original rectangle will be used. E.g., B13=A2:C9 You can set the only columns or the row or both to absolute coordinates using the \$ sign.
\subsection{Sorting data}
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel11.pdf}
\caption{Some numbers to sort}
\label{fig:excel11}
\end{figure}
If you had a bunch of numbers in random order, you could easily sort them by selecting the column or row of numbers, then click on Data from the menu, and choose sort:
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/Excel12.pdf}
\caption{Sorting}
\label{fig:excel12}
\end{figure}
You will see a menu something like this. You can choose to sort the current column ascending (smallest to largest) or descending (largest to smallest). Click OK and the data will be rearranged in order.
\subsection{Making a histogram}
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/Excel13.pdf}
\caption{Making a histogram}
\label{fig:excel13}
\end{figure}
If you want to know how many responses occurred for a particular range of values you can create a histogram. The following example is a very simple way to count individual categories of values in a data set.
Column A has the example data. In column C, I created cells with values ranging from 1 to 7 (Cells C2 : C8). In cell D2, I typed in the countif(range,value) formula. The range refers to the selected data (\$A\$2:\$A\$30).
Note, there are \$ signs used before the letter and numbers so that the selected data will be the same when the formula is copied. In this case, it is the value 1, which is in Cell C2. The value refers to the entity that is being counted. When you press enter, excel will compute the number of times that 1 appears in the data. You can now drag cell (D2) with the formula down, and it will be used to count the rest of the numbers.
\subsection{Making a table}
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/Excel14.pdf}
\caption{Making a table}
\label{fig:excel14}
\end{figure}
Now that you have the frequency of each of the values from 1-7.
You can make a graph by selecting the column with the frequencies in it, and then click on INSERT, from the menu, and choose chart. Select a column chart, and then you will see:
You can then edit this chart. You should insert a title for the figure, an x-axis label (e.g., the numbers on the bottom represent different categories from the data), and a y-axis label (the numbers on the left side represent the frequency count).
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/Excel15.pdf}
\caption{A figure for the histogram}
\label{fig:excel15}
\end{figure}
\subsection{Paired Samples T-test}
Suppose you ran an experiment with one independent variable that has 2 levels. You used a within- subject design so each participant contributed data to each of the 2 levels. You want to find out if there was significant effect. That is, is the mean performance in Level 1 different from mean performance in Level 2. An appropriate test is the paired-samples t-test.
Here is some sample data in Excel.
and the number 1 tells excel to use a paired samples t-test. The resulting p-value is <.05 so we now know that there was a significant effect. The means for level 1 were significantly smaller than the means for level 2.
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/Excel16.pdf}
\caption{t test in excel}
\label{fig:excel16}
\end{figure}
Tip: Even before you run a paired samples t-test you should have a good idea whether the test will be significant. You can get a ballpark estimate by computing the differences between means for each subject. This has been done in the example under the column labeled Difference. Here, the mean for level 1 has been subtracted from the mean for level 2 for each subject. This gives us a difference score. Look at all of the difference scores. You will see that almost all of them (except for 2) are positive. So, most of the subjects showed the effect. As a ballpark rule, when most of the subjects show the effect you should expect that a t-test will likely show a significant result.
\section{SPSS}
\subsection{Paired Samples T-test in SPSS}
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS1.pdf}
\caption{Copy the data into SPSS}
\label{fig:spss1}
\end{figure}
1. Copy the data for each level of the independent variable into separate columns in the data editor. In the example I've given new names (lev1, lev2) to each of the conditions.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS2.pdf}
\caption{Choose t-test}
\label{fig:SPSS2}
\end{figure}
2. Next,choose analyze from the menu, select Compare Means, then select Paired-Samples T Test
3. You will see the following menu. Select both of your variables (Lev1, Lev2) then press the arrow button to move them into the paired variables list
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS3.pdf}
\caption{Select levels}
\label{fig:SPSS3}
\end{figure}
4. It should look like this... now click the button for the analysis
5. The first box shows basic descriptive statistics, means, number per cell, standard deviation and standard error of the mean.
\begin{figure}
\includegraphics[width=\linewidth]{LabmanualFigures/SPSS4.pdf}
\caption{ttest output}
\label{fig:SPSS4}
\end{figure}
6. The third box shows the t-value, the associated degrees of freedom (df), and the p- value (Sig. (2-tailed)
7. Here's how you write up your results in one sentence.
8. Means were significantly smaller for level 1 (174) than level 2 (190), t(15) = 2.51, p<.05.
\subsection{2 x 2 Repeated Measures ANOVA}
\subsection{From Data to Analysis}
Suppose you ran an experiment with 2 independent variables, each with 2 levels. If this is a within- subject design, each participant will contribute data to each of the 4 levels in the design. The appropriate test is a 2x2 repeated measures ANOVA. We begin by looking at the data in excel.
\begin{figure}
\includegraphics[width=.7\linewidth]{LabmanualFigures/SPSS5.pdf}
\caption{Sample data for a 2 x 2 factorial design}
\label{fig:SPSS5}
\end{figure}
1. All of the four conditions are placed into 4 separate columns. The second IV is nested underneath the first IV.
2. The means and standard deviations can be computed directly in excel for each of the conditions
3. The means are then organized in a table, and a figure can be created so that we can see the pattern of results.
4. Main effects: It looks like there are two main effects. One for IV1: level 1 is smaller than level 2. One for IV2: level 1 is smaller than level 2
5. Interaction:It looks like there is an interaction. The difference between L1 and L2 of IV1 is larger for level 1 of IV2 (in red) than level 2 of IV2 (in blue).
6. We need to run a 2x2 repeated
measures ANOVA to determine whether the main effects are significant, and to determine if the interaction is significant.
\subsection{2x2 Repeated measures ANOVA in SPSS}
\begin{figure}
\includegraphics[width=.6\linewidth]{LabmanualFigures/SPSS6.pdf}
\caption{Copy data into SPSS}
\label{fig:SPSS6}
\end{figure}
1. Copy the data into SPSS. Make sure each column represents a different condition in the design.
2. Give names to each of the variables. The first column represents IV1 Level1 \& IV2 Level 1. The second column represents IV1 Level 1 \& IV2 Level 2. The third column represents IV1 Level 2 \& IV2 Level 1. The fourth column represents IV1 Level 2 \& IV2 Level 2.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS7.pdf}
\caption{Choose the model}
\label{fig:sp7}
\end{figure}
3. Choose Analyze, General Linear Model, Repeated Measures from the menu.
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS8.pdf}
\caption{Name variables}
\label{fig:SPSS8}
\end{figure}
\begin{figure}
\includegraphics[width=.5\linewidth]{LabmanualFigures/SPSS9.pdf}
\caption{ thing a}
\label{fig:SPSS9}
\end{figure}
4. Name IV1, specify 2 levels
5. Name IV1, specify 2 levels
6. Name dependent variable, click define
7. Select all four conditions, press the first right arrow, if you want more options see below, otherwise press ok.
\begin{figure}
\includegraphics[width=\linewidth]{LabmanualFigures/SPSS10.pdf}
\caption{Select conditions}
\label{fig:SPSS10}
\end{figure}
8. If you pressed options, then you can ask SPSS to report descriptive statistics for each condition. Choose the factors that you want, then press the arrow button to move them into the display means field. Make sure you click descriptive statistics. Then click continue.
\begin{figure}
\includegraphics[width=\linewidth]{LabmanualFigures/SPSS11.pdf}
\caption{Option to report descriptive statistics}
\label{fig:SPSS11}
\end{figure}
\subsection{Finding numbers in the SPSS analysis output}
SPSS gives you lots of information, you need to know what you are looking for. When you report the results from a 2x2 ANOVA, you will have 2 main effects, and 1 interaction. This means you will be looking for 3 F-values, 3 MSEs (Mean squared error terms), and 3 associated p-values. You will also need to know the means for the main effects and the means for the interaction.
\begin{figure}
\includegraphics[width=\linewidth]{LabmanualFigures/SPSS12.pdf}
\caption{Descriptive Statistics}
\label{fig:SPSS12}
\end{figure}
If you chose the option for descriptive stats, then you will see a small table with Means and standard deviations for each condition in the design. You will use these means to compute averages for the main effects. You will use these means to report the pattern for the interaction.
\subsection{The ANOVA table}
All of the information that you need to report main effects and interactions is available in the "tests of within-subjects effects" table.
\begin{figure}
\includegraphics[width=\linewidth]{LabmanualFigures/SPSS13.pdf}
\caption{ANOVA table}
\label{fig:SPSS13}
\end{figure}
\subsection{Main effect for IV 1}
Each main effect is listed under source. Each main effect has a corresponding error term listed below. The source for the main effect of independent variable 1 is labeled IV one. You will see it has a corresponding df of 1, an F-value of 112.371, and a p-value <.05. This main effect is significant. You would report this in the following way.
The main effect for independent variable 1 was significant, F(1,15) = 112.371, MSE = 656.47, p<.05.
The 15 comes from the df from the error term for IVone. As well, the MSE (656.47) comes from the Mean Square from the error term. When you report the main effect, you will also need to report the pattern. The above sentence simply tells the reader that there was a significant difference between the levels of IVone, however it does not explain whether level 1 was larger or smaller than level 2. To report the main effect you will need to compute the averages for each level of IVone. You can do this directly in excel, or you can have SPSS compute these numbers for you by checking the appropriate options before running the analysis. You will need all of the numbers from the above descriptive statistics table.
First, we find the average for level 1 of IVone (477.5 + 499)/2 = 488
Second, we find the average for level 2 of IVone (659.1875 + 746.5)/2 = 703
Now we can write a complete description of the main effect for IVone.
The main effect for independent variable 1 was significant, F(1,15) = 112.371, MSE = 656.47, p<.05. Mean performance was lower for level 1 (488) than level 2 (703).
\subsection{Main effect for IV 2}
Following the same steps as above we look for IV two in the source table and find the dfs, the F-value, the MSE for the error term, and the p-value.
The main effect for independent variable 2 was significant, F(1,15) = 124.92, MSE = 379.14, p<.05. Mean performance was lower for level 1 (568) than level 2 (623).
\subsection{The interaction effect}
Reporting the interaction uses the same information that you would use to report the main effect. The interaction is listed in the source table as IVone*IVtwo. It has corresponding dfs, F-value, MSE for the error term, and a p-value.
The interaction between independent variable one and two was significant, F(1,15) = 36.89, MSE = 469.64, p<.05. The difference between level one and level two for independent variable two was smaller for level one (499 - 478 = 21) than level two (747 - 659 = 88) of independent variable 1.
\subsection{Post Hoc Tests to interpret the interaction}
Post hoc tests are used to clarify the nature of the interaction. In the above example we found a significant interaction. The difference between level 1 and level 2 for IV two was 21 and 88 for level 1 and level 2 of the first independent variable. The significant interaction tells us that 21 and 88 are significantly different from each other. However, we still do not know whether each comparison is significant in an of itself. For example it may be the case that the difference between level 1 and level 2 for IV 2 was only significant for the 2nd level of IV1 (88), and not significant for the 1st level of IV1 (21). In this example you can run a paired t-test, or a one-way repeated measures ANOVA on the specific comparison that you are interested in. For example, you would compare the means from IV1L1/IV2L1 and IV1L1/IV2L2. This comparison would test whether the difference of 21 that was found was actually significant from 0. You will report the statistics for the post-hoc tests in the same manner as you would report t-tests, or F-statistics from the above examples. | {
"alphanum_fraction": 0.755033557,
"avg_line_length": 57.3984168865,
"ext": "tex",
"hexsha": "52c3b5fd99e316a09974d25ad657a1721d210650",
"lang": "TeX",
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2022-03-04T23:17:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-01T14:17:08.000Z",
"max_forks_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "danBurrell/research_methods_with_R",
"max_forks_repo_path": "LabManual/stats.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "danBurrell/research_methods_with_R",
"max_issues_repo_path": "LabManual/stats.tex",
"max_line_length": 1083,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "danBurrell/research_methods_with_R",
"max_stars_repo_path": "LabManual/stats.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-24T12:46:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-29T16:39:51.000Z",
"num_tokens": 5496,
"size": 21754
} |
\documentclass[12pt]{book}
\usepackage{mathtools}
\usepackage{listings}
\usepackage{amssymb}
\title{Software Design - Catch A Cab }
\author{Gustav, Robin, Pascal, Samuel, David}
\date{\today}
\begin{document}
\maketitle
\chapter{Overview / introduction}
How to hail a cab without a smartphone.
//TODO short excepert on purpose idea and scope
\section{Overall description}
//TODO
% ----------------- Requirements ---------------------------
\chapter{Specific requirements specification}
\section{Functional Requirements}
\subsection{Functional requirement}
\textbf{Title:} Person recognition \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Determine propability that person is hailing for a cab\\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Hand detection \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Handsign detection\\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Count number of fingers\\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Blacklist objects detection \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Assign detected object to person \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Call other cab if required \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Detect distance between person and street \\
\textbf{Description:} \\
\textbf{Dependency:}
\subsection{Functional requirement}
\textbf{Title:} Recognize elderly, remmeber their faces and schedules \\
\textbf{Description:} Recognize patterns in person's ride schedules and show up on reoccuring times and places.\\
\textbf{Dependency:}
\section{Non-functional Requirements}
//TODO
\section{activity and use case diagram}
// TODO das, was Robin an die Tafel geschrieben hatte
\section{External interface requirements}
% ----------------- Archtitecture ---------------------------
\chapter{Software architecture}
\section{Service oriented design}
\subsection{List of services}
\subsection{Interfaces}
Welcher Service bekommt welche daten und returned welche daten in welchem Format?
\section{Technology stack}
\begin{itemize}
\item[1.] Python 3
\item[2.] Open cv
\item[3.] Open vino
\item[4.] ros
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7403206883,
"avg_line_length": 25.8282828283,
"ext": "tex",
"hexsha": "7caa1d10aa7c828b6ce3b1a2947120f3f6114f5e",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-11-17T00:42:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-11-17T00:42:42.000Z",
"max_forks_repo_head_hexsha": "c19ba3316becd314190a3fb74043c2821eefa496",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "CatchACab/catchacab",
"max_forks_repo_path": "documentation/documentation.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "c19ba3316becd314190a3fb74043c2821eefa496",
"max_issues_repo_issues_event_max_datetime": "2018-11-17T01:56:54.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-11-17T01:17:11.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "CatchACab/catchacab",
"max_issues_repo_path": "documentation/documentation.tex",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c19ba3316becd314190a3fb74043c2821eefa496",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "CatchACab/catchacab",
"max_stars_repo_path": "documentation/documentation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 657,
"size": 2557
} |
\chapter*{INTRODUCCIÓN}
\addcontentsline{toc}{chapter}{INTRODUCCIÓN} | {
"alphanum_fraction": 0.8235294118,
"avg_line_length": 34,
"ext": "tex",
"hexsha": "7f3d631afb2c47b110d7f259e287c3aac70c0f25",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "339b1cea55f0cc08106782d43460870a6e02fd1b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tonykevin/report",
"max_forks_repo_path": "chapters/introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "339b1cea55f0cc08106782d43460870a6e02fd1b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tonykevin/report",
"max_issues_repo_path": "chapters/introduction.tex",
"max_line_length": 44,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "339b1cea55f0cc08106782d43460870a6e02fd1b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tonykevin/report",
"max_stars_repo_path": "chapters/introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2017-10-29T15:34:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-29T15:34:53.000Z",
"num_tokens": 25,
"size": 68
} |
\section{\module{MacOS} ---
Access to Mac OS interpreter features}
\declaremodule{builtin}{MacOS}
\platform{Mac}
\modulesynopsis{Access to Mac OS-specific interpreter features.}
This module provides access to MacOS specific functionality in the
Python interpreter, such as how the interpreter eventloop functions
and the like. Use with care.
Note the capitalization of the module name; this is a historical
artifact.
\begin{datadesc}{runtimemodel}
Either\code{'carbon'} or \code{'macho'}. This
signifies whether this Python uses the Mac OS X and Mac OS 9 compatible
CarbonLib style or the Mac OS
X-only Mach-O style. In earlier versions of Python the value could
also be \code{'ppc'} for the classic Mac OS 8 runtime model.
\end{datadesc}
\begin{datadesc}{linkmodel}
The way the interpreter has been linked. As extension modules may be
incompatible between linking models, packages could use this information to give
more decent error messages. The value is one of \code{'static'} for a
statically linked Python, \code{'framework'} for Python in a Mac OS X framework,
\code{'shared'} for Python in a standard unix shared library and
\code{'cfm'} for the Mac OS 9-compatible Python.
\end{datadesc}
\begin{excdesc}{Error}
This exception is raised on MacOS generated errors, either from
functions in this module or from other mac-specific modules like the
toolbox interfaces. The arguments are the integer error code (the
\cdata{OSErr} value) and a textual description of the error code.
Symbolic names for all known error codes are defined in the standard
module \refmodule{macerrors}.\refstmodindex{macerrors}
\end{excdesc}
\begin{funcdesc}{SetEventHandler}{handler}
In the inner interpreter loop Python will occasionally check for events,
unless disabled with \function{ScheduleParams()}. With this function you
can pass a Python event-handler function that will be called if an event
is available. The event is passed as parameter and the function should return
non-zero if the event has been fully processed, otherwise event processing
continues (by passing the event to the console window package, for instance).
Call \function{SetEventHandler()} without a parameter to clear the
event handler. Setting an event handler while one is already set is an
error.
Availability: MacPython-OS9.
\end{funcdesc}
\begin{funcdesc}{SchedParams}{\optional{doint\optional{, evtmask\optional{,
besocial\optional{, interval\optional{,
bgyield}}}}}}
Influence the interpreter inner loop event handling. \var{Interval}
specifies how often (in seconds, floating point) the interpreter
should enter the event processing code. When true, \var{doint} causes
interrupt (command-dot) checking to be done. \var{evtmask} tells the
interpreter to do event processing for events in the mask (redraws,
mouseclicks to switch to other applications, etc). The \var{besocial}
flag gives other processes a chance to run. They are granted minimal
runtime when Python is in the foreground and \var{bgyield} seconds per
\var{interval} when Python runs in the background.
All parameters are optional, and default to the current value. The return
value of this function is a tuple with the old values of these options.
Initial defaults are that all processing is enabled, checking is done every
quarter second and the processor is given up for a quarter second when in the
background.
The most common use case is to call \code{SchedParams(0, 0)} to completely disable
event handling in the interpreter mainloop.
Availability: MacPython-OS9.
\end{funcdesc}
\begin{funcdesc}{HandleEvent}{ev}
Pass the event record \var{ev} back to the Python event loop, or
possibly to the handler for the \code{sys.stdout} window (based on the
compiler used to build Python). This allows Python programs that do
their own event handling to still have some command-period and
window-switching capability.
If you attempt to call this function from an event handler set through
\function{SetEventHandler()} you will get an exception.
Availability: MacPython-OS9.
\end{funcdesc}
\begin{funcdesc}{GetErrorString}{errno}
Return the textual description of MacOS error code \var{errno}.
\end{funcdesc}
\begin{funcdesc}{splash}{resid}
This function will put a splash window
on-screen, with the contents of the DLOG resource specified by
\var{resid}. Calling with a zero argument will remove the splash
screen. This function is useful if you want an applet to post a splash screen
early in initialization without first having to load numerous
extension modules.
Availability: MacPython-OS9.
\end{funcdesc}
\begin{funcdesc}{DebugStr}{message \optional{, object}}
On Mac OS 9, drop to the low-level debugger with message \var{message}. The
optional \var{object} argument is not used, but can easily be
inspected from the debugger. On Mac OS X the string is simply printed
to stderr.
Note that you should use this function with extreme care: if no
low-level debugger like MacsBug is installed this call will crash your
system. It is intended mainly for developers of Python extension
modules.
\end{funcdesc}
\begin{funcdesc}{SysBeep}{}
Ring the bell.
\end{funcdesc}
\begin{funcdesc}{GetTicks}{}
Get the number of clock ticks (1/60th of a second) since system boot.
\end{funcdesc}
\begin{funcdesc}{GetCreatorAndType}{file}
Return the file creator and file type as two four-character strings.
The \var{file} parameter can be a pathname or an \code{FSSpec} or
\code{FSRef} object.
\end{funcdesc}
\begin{funcdesc}{SetCreatorAndType}{file, creator, type}
Set the file creator and file type.
The \var{file} parameter can be a pathname or an \code{FSSpec} or
\code{FSRef} object. \var{creator} and \var{type} must be four character
strings.
\end{funcdesc}
\begin{funcdesc}{openrf}{name \optional{, mode}}
Open the resource fork of a file. Arguments are the same as for the
built-in function \function{open()}. The object returned has file-like
semantics, but it is not a Python file object, so there may be subtle
differences.
\end{funcdesc}
\begin{funcdesc}{WMAvailable}{}
Checks whether the current process has access to the window manager.
The method will return \code{False} if the window manager is not available,
for instance when running on Mac OS X Server or when logged in via ssh,
or when the current interpreter is not running from a fullblown application
bundle. A script runs from an application bundle either when it has been
started with \program{pythonw} instead of \program{python} or when running
as an applet.
On Mac OS 9 the method always returns \code{True}.
\end{funcdesc}
| {
"alphanum_fraction": 0.7787503773,
"avg_line_length": 41.1552795031,
"ext": "tex",
"hexsha": "7b1c79fdb69e2dbe503930b501c4a5f0579fc1e8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849",
"max_forks_repo_licenses": [
"PSF-2.0"
],
"max_forks_repo_name": "deadsnakes/python2.3",
"max_forks_repo_path": "Doc/mac/libmacos.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"PSF-2.0"
],
"max_issues_repo_name": "deadsnakes/python2.3",
"max_issues_repo_path": "Doc/mac/libmacos.tex",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849",
"max_stars_repo_licenses": [
"PSF-2.0"
],
"max_stars_repo_name": "deadsnakes/python2.3",
"max_stars_repo_path": "Doc/mac/libmacos.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1605,
"size": 6626
} |
%!TEX root=../Selex.tex
\section*{Discussion} % (fold)
\label{sec:discussion}
% Summary of the study
The over-arching objective of this simulation study was to compare the performance of assuming more structural complexity in selectivity when in fact the real data come from a simple stationary process, and assuming simple structural complexity when real data come from a fishery with dynamic changes in selectivity. To address this objective, a simulation model conditioned on length-based selectivity and variable length-at-age by year was used to generate simulated data for four alternative assessment models that assumed: (a) selectivity was length-based and stationary, (b) selectivity was length-based and changed discretely in four time periods, and (c) selectivity was age-based and allowed to change each year, and (d) selectivity was age-based and interpolated over age and year using a bicubic spline and 60 equally spaced knots. From the perspective of a na\"ive analyst who is unfamiliar with the history of the fishery and the source of the catch-age data, adopting a penalized time-varying selectivity may be more appropriate than assuming constant selectivity. This general result is also consistent with a similar simulation study that examined time-varying changes in catchability \citep{wilberg2006performance}.
The addition of age-composition information into stock assessments greatly enhances the estimability of the underlying production function and related reference points \citep{magnusson2007mfd}. In addition, age-composition information can also contribute to the estimation of over-all population scale via catch-curves and fixed assumptions about natural mortality and fisheries selectivity. However, as assumptions about natural mortality and selectivity are relaxed (and freely estimated), information about population scaling degrades \citep{hilborn1992quantitative}. If the relative abundance index available for fitting lacks contrasting information to resolve confounding between overall productivity and population scale, the move towards more flexible selectivity models will lead to greater uncertainty. In such cases where it is known that selectivity has changed over time, the addition of prior information on population scaling (i.e., priors for $B_o$ or survey $q$) will particularly valuable.
Earlier versions of the multivariate logistic likelihood for the age-composition data added a small fixed constant (1.e-30) to the observed and predicted age-proportions to ensure that the function remained defined when observed proportions-at-age were equal to 0 (i.e., this was done to avoid taking the natural logarithm of 0). It turns out that the value of the fixed constant would have slight influences on the results and in some cases prevent the non-linear search routine from converging to a solution. Adding a small constant to 0-observations that are likely to have high measurement error is akin to imputing data and is probably not a safe practice in the long-run. As an alternative approach, we adopted a method used by \cite{richards1997visualizing} where observed 0 proportions-at-age (or some minimum proportion, e.g., 2\% in their paper) were pooled with the adjacent year class for that year only. For example, if the observed proportion of age-4 fish in 1985 was equal to 0, the estimation model would compute the likelihood for the number of age 4--5 fish in 1986; there is no likelihood component for age-4 fish in 1985. This pooling of year classes eliminates the need for adding small, potentially influential, constants to the likelihood. There is also a small caveat on this pooling approach: if a given cohort never appears in catch-age data (i.e., a complete year-class recruitment failure), the estimation model will equally split the year class into the adjacent cohort.
Changes in selectivity over time are also a special case of time-varying catchability. It has already been demonstrated that additional sources of information, such as tagging data \citep{martell2002implementing}, and/or area swept information \citep{winters1985interaction}, would reduce the confounding between stock size and stock productivity. Statistical catch-at-age models rely on a separability assumption where year and age effects in the observed catch-at-age data can be partitioned into fishing mortality and selectivity, respectively. Having auxiliary information on either one of these effects from area-swept estimates of relative fishing mortality or size-based selectivity based on tag return data would reduce potential confounding and improve the estimability of time-varying parameters \citep[e.g.,][]{sinclair2002disentangling}.
This simulation study examined the specific case where the true underlying selectivity is length-based and the corresponding age-based selectivity changes over time due to changes in growth rates. Selectivity is a product of vulnerability and availability. Vulnerability is the probability of catching a fish at a given time/location assuming the fish is available to harvest. Availability is the probability of fish being present in the time/location where fishing activity is occurring. These two processes are completely confounded and cannot be separated without additional information that directly measures either vulnerability or availability. As a result of these two processes, the definition of fisheries selectivity may have many subtle differences among fisheries, or even among years in a given fishery. Moreover, changes in harvest policy, or changes in allocation among regulatory areas, or fishing fleets, can result in dramatic changes in age-based selectivity due to time/area interactions between various fishing fleets and stock distribution. Given such complexities, it might be preferable to always adopt an age-based time-varying selectivity sub model in statistical catch-age assessments. Initial assessments could start with very high penalty weights to constrain how much selectivity is allowed to vary (e.g., $\lambda^{(3)}$), then begin to relax the penalty and examine the sensitivity of both model fit and policy performance to the assumed penalty weight. Simply assuming a fixed selectivity model, or even block selectivities, is akin to extremely large penalty weights on time-varying selectivity. In this regard, this simulation presented a 'worst-case' scenario for time-varying selectivity. Future work could simulate the application of time-varying selectivity in conjunction with a set of rules governing adjustment of the $\lambda^{(3)}$ and directly explore its performance (i.e., Table \ref{tab:rankorder}).
One potential concern with the addition of more and more selectivity coefficients is that the assessment model begins to over-fit the age-composition data and explain the observed data with additional recruitment variation and more complex selectivity coefficients. This result was observed in the Monte Carlo simulations in this study where the underlying data were generated with extremely complex selectivity patterns and the estimation model had a very flexible selectivity model with many estimated parameters. Such an over-parameterized model would be of less utility in forecasting due to large uncertainties and confounding in selectivity and recruitment deviations in the terminal year. However, the true uncertainty would be better represented, which might lead to more realistic expectations in a decision table framework. Penalized likelihoods can ameliorate this to some extent, but we've also shown that the use of interpolation methods (e.g., bicubic splines) for computing age-specific selectivity coefficients each year can perform well. The subjective issue of importance in the case of using a bicubic spline is the number of spline knots that should be estimated for the year effect. Presumably model selection could proceed in similar fashion as a stepwise-selection that was proposed by \cite{thorson2012stepwise}. Rules for this procedure could also be tested in similar simulation studies such as this one.
The vast majority of assessment models are age-based requiring age-specific estimates of fishing mortality rates, and hence age-based selectivity \citep{gavaris2002sif}. Adopting a fixed age-based selectivity model would certainly lead to erroneous errors if the true underlying model is length-based and substantial changes in growth rates have occurred over time. Two options for dealing with this problem are: (1) model length-based selectivity and using empirical length-age data, or (2) model age-based selectivity but allow selectivity to change over time. The first option requires unbiased estimates of length-at-age. These cannot be obtained if composition data are sampled using length-selective gear and there is any appreciable variance in length for a given age. The second option is being explored by the International Pacific Halibut Commission for dealing with changes in selectivity associated with changes in size-at-age and stock distribution \citep{stewart2012assessment}. Selectivity in the directed Pacific halibut fishery is length-based given evidence from hooking success studies. There are minimum size-limits in place, so age-specific retention rates vary with changes in size-at-age over time. The transition to time-varying age-based selectivity was adopted primarily because it solved a retrospective bias that has been of major concern for this stock in recent years.
% MOve to discussion
% It is fairly typical to see such lags in estimates of abundance, even in age-structured models \citep{walters2004simple,cox2008practical}
A better alternative to specific case studies would be to develop a closed-loop feedback control system and an appropriate loss function to better elucidate which selectivity parameterization is more appropriate for achieving intended management objectives. This is also known in the fisheries realm as management strategy evaluation \citep{de1986simulation,Cooke1999,smith1999implementing}. Having an appropriate loss function to judge the performance of each alternative model would greatly improve model selection criterion from a policy performance perspective.
% section discussion (end)
\section*{Acknowledgments} % (fold)
\label{sec:acknowledgments}
The authors would like to thank the organizers of the CAPAM workshop held on March 12-14, 2013 in La Jolla California. The first author would also like to thank James Ianelli and Dave Fournier for the assistance in developing the bicubic spline model for this application. Thanks to Bruce Leaman, Andre Punt, Allen Hicks, Robyn Forrest, Ray Hilborn, James Thorson, and many other for feedback on earlier presentations of this work and the provision of the Pacific hake data.
% section acknowledgments (end) | {
"alphanum_fraction": 0.8210468116,
"avg_line_length": 361.7333333333,
"ext": "tex",
"hexsha": "6b808b70697108aa87996511869914c3740298c7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b6e1f1b5c3f81e1860a983cbafd18221d365fdb6",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "krHolt/iSCAM",
"max_forks_repo_path": "fba/CAPAM/WRITEUP/Discussion/Discussion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b6e1f1b5c3f81e1860a983cbafd18221d365fdb6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "krHolt/iSCAM",
"max_issues_repo_path": "fba/CAPAM/WRITEUP/Discussion/Discussion.tex",
"max_line_length": 1958,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b6e1f1b5c3f81e1860a983cbafd18221d365fdb6",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "krHolt/iSCAM",
"max_stars_repo_path": "fba/CAPAM/WRITEUP/Discussion/Discussion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2164,
"size": 10852
} |
%
% POK header
%
% The following file is a part of the POK project. Any modification should
% be made according to the POK licence. You CANNOT use this file or a part
% of a file for your own project.
%
% For more information on the POK licence, please see our LICENCE FILE
%
% Please follow the coding guidelines described in doc/CODING_GUIDELINES
%
% Copyright (c) 2007-2020 POK team
\chapter{Architecture}
\section{Directories hierarchy}
The project is organized with a hierarchy of several directories:
\begin{itemize}
\item
\textbf{examples}: sample code that uses pok and libpok. Code of
examples is mostly generated from AADL models by Ocarina.
\item
\textbf{kernel}: code of the kernel that provides time and space
partitioning services.
\item
\textbf{libpok}: code of libpok, the runtime of each partition.
It contains libc, POSIX and arinc653 compliant abstraction layers.
\item
\textbf{misc}: misc files, such as makefiles, various tools to
compile POK and so on.
\end{itemize}
\section{"Schyzophrenic" architecture}
POK can be used as an executive (i.e a kernel that contains different tasks
but does not provide partitioning functionnalities) or a partitioned
architecture (a kernel isolates tasks in so-called partitions in terms
of space and time).
Moreover, it was designed to support several API and services. But you can
finely tune the kernel to avoid unused services, reduce memory footprint and
ease certification/verification efforts.
Next sections discusses the different architectures that can be used.
\subsection{Partitioned architecture}
The partitioned architecture pattern can be used with POK. In that case,
the kernel will execute several partitions on top of the POK kernel and
provide time and space partitioning across partitions.
\onehugefig{imgs/pok-layers}{The different pok layers}{figure:pok-layers}
Each partition contains their memory allocators, their runtime and
ressources (the so-called \textit{libpok} part). Partitions can have
different scheduling algorithms to schedule their tasks.
In that case, the kernel provides communication isolation across
partitions as well as space isolation (each partition has its own memory
segment).
The overall architecture is illustrated in figure \ref{figure:pok-layers}.
The kernel executes the partitions, each partition contains its
application code. Drivers are executed in partitions and don't reside
inside the kernel.
To build a such architecture, you must have:
\begin{itemize}
\item
For each partition
\begin{itemize}
\item
The application code
\item
The configuration code
\end{itemize}
\item
For the kernel
\begin{itemize}
\item
The configuration code
\end{itemize}
\end{itemize}
\onehugefig{imgs/partitions-build-steps}{Build steps for a partitioned
system}{figure:partitioned-architecture-build}
Then, each part of the system is compiled and integrated, as depicted in
figure \ref{figure:partitioned-architecture-build}. The kernel is
compiled and each partitions is compiled. Eeach part produces a binary
file. Since POK relies on the ELF file format, each binary of each part is
compiled into an ELF file.
Then, we integrate \textbf{ALL} ELF files to produce a single bootable binary so
that the final binary contains different binaries:
the code for the kernel and the code of all partitions. Since POK relies
on the ELF file format, the final ELF file contains other ELF files. The
organization of the final binary is depiceted in figure
\ref{figure:pok-elf-file}.
When kernel boots, it loads each elf file of each partition in a different
memory segment to achieve space isolation. So, each ELF file of each
partition is loaded in a single and protected memory area of the system.
\onehugefig{imgs/pok-elf-file}{ELF file format of a POK system}{figure:pok-elf-file}
\subsection{Executive architecture}
At this time, the execute architecture pattern is not finished.
\section{Kernel services}
\subsection{Partitioning service}
The partitioning service of POK isolates code in time and space. Each
partition has one or more time slots to execute their code and they are
isolated in a memory segment.
Using this design guideline, one partition cannot access the memory of other
partitions (and \textit{vice-versa}). During partitions initialization, POK
automatically creates a memory segment for each partition and copy its code
into this protected space.
However, partitions can communicate with other partitions using so-called
ports. Inter-partitions ports are also supervised by the kernel in order to
avoid unallowed communication channel. See section
\ref{kernel-service-communication} for more information.
Partitions have time slots to execute their threads. During this execution
time, they schedule their threads according to their own scheduling protocol
so that partitions can schedule their threads in an independent way. This
scheduling strategy is often described as a hierarchical scheduling.
\subsection{Thread service}
The thread service executes tasks. The system is built to execute a
predefined number of tasks. When using partitioning services, each partitions
has a predefined amount of tasks.
The scheduler can be preemptive so that tasks can interrupt each other. The
thread service can start, stop or pause a task (sleep).
\subsection{Time service}
The time service provides an efficient way to manage the time on your
machine. It is used by the scheduler to scheduler partitions and tasks
according to their timing requirements (period, execution time and so on).
\subsection{Communication service}
\label{kernel-service-communication}
The kernel provides communication services. It allows partitions and threads
to communicate. The communication service is achieved using \textit{ports}.
\textit{Out} ports (ports that send data) can have several destinations
while \textit{in} ports (ports that receive data) can have only one source.
Data are sent and received on this ports. The kernel configuration specifies
the owner of a port, its destination and its size.
If you use partitioning service, each port is dedicated to a partition.
Consequently, when creating the port, the kernel checks that requested port
belongs to the partition.
\subsubsection{Communication using network}
When using the network, the port must be bound to a network interface so
that data from/to the port will be sent over the network. The binding
between a port and a network interface is specified in the kernel
configuration.
Please note that in POK, when you are using partitioning services, device
drivers are executed in partitions.
\subsection{Scheduling service}
The scheduling service schedules tasks and partitions according to their
timing requirements. It relies on the time service.
Partitions are scheduled using a cyclic scheduling algorithm.
Partitions threads are scheduled using a Round-Robin, RMS or other available
scheduling algorithms.
\section{libpok services}
\subsection{Thread management}
Thread management consist in interfacing functions with the kernel. It
provides functions to start/suspend/stop a thread. It provides also locking
services for mutexes/semaphores and so on.
\subsection{Communication service}
Libpok provides two kind of communication services:
\begin{itemize}
\item
\textbf{Inter-partition communication} which consists in
kernel-interfacing functions to use kernel communication ports.
\item
\textbf{Intra-partition communication service} which provides
communication facilities to communicate inside a partition.
\end{itemize}
In the following, we detail intra-partition communication services.
Intra-partition communication service provides four communication patterns:
\begin{enumerate}
\item
\textbf{Buffer} : thread send data. New data are queued according to a
specific queueing policy. Items are dequeued when a task reads the
buffer. We can store several instance of the same data.
You need to define the \texttt{POK\_NEEDS\_BUFFERS} maccro to activate
this service.
\item
\textbf{Blackboard} : a shared memory space to store a data. New
instances of the data replace the older value. We can store only one
instance of the same data.
You need to define the \texttt{POK\_NEEDS\_BLACKBOARDS} maccro to activate
this service.
\item
\textbf{Events} : are used to synchronized tasks. It corresponds to
POSIX mutexes and conditions.
You need to define the \texttt{POK\_NEEDS\_EVENTS} maccro to activate
this service.
\item
\textbf{Semaphores} : counting semaphores, as in the POSIX standard.
You need to define the \texttt{POK\_NEEDS\_SEMAPHORES} maccro to activate
this service.
\end{enumerate}
\subsection{Memory allocator}
POK also provides a memory allocator. This memory allocator was designed to
be deterministic and highly configurable. You define the amount of memory for
the memory allocator and the number of memory slices that can be allocated.
Consequently, the memory allocator can be configured with different maccros.
The service is activated by defining the
\texttt{POK\_CONFIG\_NEEDS\_ALLOCATOR} maccro. Then, the
\texttt{POK\_CONFIG\_ALLOCATOR\_MEMORY\_SIZE} is used to specify the amount
of memory dedicated for the memory allocator. Finally the
\texttt{POK\_CONFIG\_ALLOCATOR\_NB\_SPACES} specifies the number of spaces
you can allocate with the memory allocator.
This memory allocator can be used with the legacy layer (with the
\texttt{pok\_allocator\_allocate()} or \texttt{pok\_allocator\_free()}
functions) or with the C-library layer (\texttt{malloc()}, \texttt{free()},
\texttt{calloc()}).
\subsection{Mathematic library service}
We also add mathematic functions to ease the portability of third-party code.
These functions were imported from the NetBSD\footnote{http://www.netbsd.org}
project. It provides all necessary functions to perform math operations
(\texttt{sqrt()}, ...).
To enable the libmath functions, you must define the maccro
\texttt{POK\_NEEDS\_LIBMATH}.
To have the complete list, please refer to the libpok reference manual
available on each POK release. A list of these functions is also available in
this document, in chapter \ref{chapter-api}.
\subsection{Protocols}
The libpok layer contains predefined protocols to marshall/unmarshall
application data before sending them on the network. These protocols library
could be used for several purposes: encrypt data before sending it on an
unsecure network, adapt application data to constrained protocols such as
CORBA, \ldots
These protocols can be automatically used through \aadl models and
appropriate properties associated to \aadl data ports on \aadl process
components. To have more information about \aadl and protocol binding, see
section \ref{chapter-automatic-configuration}.
At this time, the libpok layer is focuses on crypto and provides the following protocols:
\begin{itemize}
\item
Ceasar
\item
DES
\item
SSL
\end{itemize}
For each protocol, we have:
\begin{itemize}
\item
A function to marshall data.
\item
A function to unmarshall data.
\item
An associated type if the protocol needs a special data type to store
marshalled values.
\end{itemize}
Marshalling functions and types are described in their header files (see
\texttt{des.h}, \texttt{ssl.h}, \texttt{ceasar.h} and so on). If there is no
associated marshalling type, then, the marshall/unmarshall functions uses the
same type as the application type or not particular type are required.
Details of each protocol can be found in the API section (chapter
\ref{chapter-api}).
| {
"alphanum_fraction": 0.723245786,
"avg_line_length": 41.9572368421,
"ext": "tex",
"hexsha": "2202d33e1000ba20d20870c2d4461ae1ad74c620",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6c904d4e8ce111205ae869888400a13f13c1b790",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "rguilloteau/pok",
"max_forks_repo_path": "doc/userguide/architecture.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6c904d4e8ce111205ae869888400a13f13c1b790",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "rguilloteau/pok",
"max_issues_repo_path": "doc/userguide/architecture.tex",
"max_line_length": 92,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bbf79ba8fa1572f14ec99ef0b549da06304a4223",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "samueltardieu/pok",
"max_stars_repo_path": "doc/userguide/architecture.tex",
"max_stars_repo_stars_event_max_datetime": "2020-11-20T09:56:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-20T09:56:19.000Z",
"num_tokens": 2846,
"size": 12755
} |
\subsection{Complex conjugate}
We have \(z=a+bi\).
The complex conjugate is:
\(\bar z=a-bi\)
| {
"alphanum_fraction": 0.6530612245,
"avg_line_length": 9.8,
"ext": "tex",
"hexsha": "995418b7a9082e6fc4845ea7a95eb6f2d38bd9e7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/algebra/complex/02-02-conjugate.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/algebra/complex/02-02-conjugate.tex",
"max_line_length": 30,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/algebra/complex/02-02-conjugate.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 32,
"size": 98
} |
\documentclass{article}
\usepackage{mystyle}
\usepackage{fastl2007-commands}
\begin{document}
\mytitle{Fastl's Data Fitting}
\section{Introduction}
\label{sec:introduction}
This document presents the results of the data fitting process to
Fastl's~\cite{Fastl2007Psychoacoustics} data. First, the weights used in the
process are described. Them a set of $R^2$ are generated for several
combinations of these weights to find the best fit given the chosen weights set.
After that, a linear polynomial fit is plotted with the best values of the
weights. Finally, the results of the model compared to the experimental data and
to Fastl's data are presented.
\section{Model Weights}
\label{sec:model_weights}
\subsection{Modulation Frequency}
\label{sub:modulation_frequency}
To model the dependency of fluctuation strength on modulation frequency, the
vector Hweight is adjusted using the parameter $w_h$. Two Hweight vectors were
obtained, $H0_{AM}$ and $H0_{FM}$, which are intended to fit AM and FM tones
respectively. By using $w_h$, a proportion between the two vectors is obtained.
When $w_h=0$ only $H0_{FM}$ is used. Values of $w_h$ from 0 to 100 in
increments of 10 were used.
\subsection{Center Frequency}
\label{sub:center_frequency}
To model the dependency of fluctuation strength on center frequency, the vector
gzi is adjusted using the parameter $w_g$. Similarly to the Hweight vector, two
vectors were obtained for AM and FM tones. The same range of values of $w_h$ was
also used for $w_g$.
\section{Linear Polynomial Fit}
\label{sec:linear_polynomial_fit}
\Cref{fig:fastl2007_r-squared_comparison_wh,fig:fastl2007_r-squared_comparison_wg}
report the values of $R^2$ for AM tones, FM tones, the sum of the $R_2$ values
for both types of tones, the absolute value of the difference of the $R^2$
value of both types of tones, and finally the sum of the sum and the difference
is provided. The $R^2$ values can be found also in tables in the appendix
section at the end of this document.
\myfigurepair%
{fastl2007_r-squared_comparison_wh}
{$R^2$ as a function of $w_h$ for $f_m$ response curves}
{fastl2007_r-squared_comparison_wg}
{$R^2$ as a function of $w_g$ for $f_c$ response curves}
Using the sum of the sum and difference of $R^2$ values (the total curve), the
best possible trade-off is achieved when $w_h = 40$ and $w_g = 90$. As so,
these values will be considered to be the ones that yield the best model fit.
Furthermore, linear polynomial fit plots using the chosen weight values for all
the experimental conditions are presented in
\cref{fig:fastl2007_linear_am,fig:fastl2007_linear_fm}.
\myfigurequad%
{fastl2007_linear_am-fm}
{Modulation frequency}
{fastl2007_linear_am-fc}
{Center frequency}
{fastl2007_linear_am-spl}
{Sound pressure level}
{fastl2007_linear_am-md}
{Modulation depth}
{
\caption{Linear polynomial fit for AM tones response curves}
\label{fig:fastl2007_linear_am}
}
\myfigurequad%
{fastl2007_linear_fm-fm}
{Modulation frequency}
{fastl2007_linear_fm-fc}
{Center frequency}
{fastl2007_linear_fm-spl}
{Sound pressure level}
{fastl2007_linear_fm-df}
{Frequency deviation}
{
\caption{Linear polynomial fit for FM tones response curves}
\label{fig:fastl2007_linear_fm}
}
\clearpage
\section{Results}
\label{sec:results}
The comparison between the results of the model, the experimental data and
Fastl's data are shown in \cref{fig:AM_all_comparison,fig:FM_all_comparison}.
\begin{comparison}
\myfigurequad%
{AM-fm_all_comparison}
{Modulation frequency}
{AM-fc_all_comparison}
{Center frequency}
{AM-SPL_all_comparison}
{Sound pressure level}
{AM-md_all_comparison}
{Modulation depth}
{
\caption{Relative fluctuation strength for AM tones}
\label{fig:AM_all_comparison}
}
\myfigurequad%
{FM-fm_all_comparison}
{Modulation frequency}
{FM-fc_all_comparison}
{Center frequency}
{FM-SPL_all_comparison}
{Sound pressure level}
{FM-df_all_comparison}
{Frequency deviation}
{
\caption{Relative fluctuation strength for FM tones}
\label{fig:FM_all_comparison}
}
\end{comparison}
\section{Discussion}
\label{sec:discussion}
\begin{itemize}
\item For modulation frequency and center frequency, a trade-off must be made
when adjusting the model parameters.
\item It remains unclear how the curves regarding SPL, modulation depth and
frequency deviation can be adjusted
\end{itemize}
\mybibliography{}
\clearpage
\appendix
\section{Adjusted R-squared and model weights}
\mytablepair{AM-fm}{FM-fm}
\mytablepair{AM-fc}{FM-fc}
\mytablepair{AM-SPL}{FM-SPL}
\mytablepair{AM-md}{FM-df}
\end{document}
| {
"alphanum_fraction": 0.7679640719,
"avg_line_length": 29.9743589744,
"ext": "tex",
"hexsha": "eb5151bdb1f37bb90ccab81bdbdc36cf6dc5e7fd",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-12-31T08:25:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-31T08:25:23.000Z",
"max_forks_repo_head_hexsha": "0a79518e536497dbe6b63c8560c7c3df97187490",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rodrigo-garcia-leon/thesis-latex",
"max_forks_repo_path": "topic/fluctuation_strength/model/data_fitting/fastl2007.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0a79518e536497dbe6b63c8560c7c3df97187490",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rodrigo-garcia-leon/thesis-latex",
"max_issues_repo_path": "topic/fluctuation_strength/model/data_fitting/fastl2007.tex",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0a79518e536497dbe6b63c8560c7c3df97187490",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rodrigo-garcia-leon/thesis-latex",
"max_stars_repo_path": "topic/fluctuation_strength/model/data_fitting/fastl2007.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1296,
"size": 4676
} |
\documentclass{article} % For LaTeX2e
\usepackage{nips14submit_e,times}
\usepackage{hyperref}
\usepackage{url}
%\documentstyle[nips14submit_09,times,art10]{article} % For LaTeX 2.09
\title{WikiCat: Automatic Categorization of Wikipedia Pages into Categories}
\author{Myung-ha Jang \& John Foley\\
Center for Intelligent Information Retrieval\\
College of Information and Computer Sciences\\
University of Massachusetts Amherst\\
\texttt{\{mhjang,jfoley\}@cs.umass.edu}
}
\newcommand{\fix}{\marginpar{FIX}}
\newcommand{\new}{\marginpar{NEW}}
%\nipsfinalcopy % Uncomment for camera-ready version
\begin{document}
\maketitle
\begin{abstract}
The abstract should be clever, exciting and short.
\end{abstract}
\section{Introduction}
Researchers love wikipedia~\cite{wang2014concept,banerjee07,gabrilovich2007computing,meij2012adding,de2014taxonomic, pohl2012classifying}.
The winner of the 2014 Kaggle challenge~\cite{puurula2014kaggle} used an ensemble of many classifiers.
\bibliographystyle{abbrv}
\small\bibliography{cites}
\end{document}
| {
"alphanum_fraction": 0.773480663,
"avg_line_length": 29.3513513514,
"ext": "tex",
"hexsha": "f8c652d0032be5ac4aa577492cf2101349c7463e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76bd48fa34bb37ba11dded14e7d3aea6e7648e47",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "jjfiv/wikicat",
"max_forks_repo_path": "cs689paper/paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76bd48fa34bb37ba11dded14e7d3aea6e7648e47",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "jjfiv/wikicat",
"max_issues_repo_path": "cs689paper/paper.tex",
"max_line_length": 139,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "76bd48fa34bb37ba11dded14e7d3aea6e7648e47",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "jjfiv/wikicat",
"max_stars_repo_path": "cs689paper/paper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 306,
"size": 1086
} |
\documentclass[11pt]{article}
\usepackage[breakable]{tcolorbox}
\usepackage{parskip} % Stop auto-indenting (to mimic markdown behaviour)
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts  syntax from Markdown).
\usepackage{graphicx}
% Maintain compatibility with old templates. Remove in nbconvert 6.0
\let\Oldincludegraphics\includegraphics
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionFormat{nocaption}{}
\captionsetup{format=nocaption,aboveskip=0pt,belowskip=0pt}
\usepackage{float}
\floatplacement{figure}{H} % forces figures to be placed at the correct location
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage{iftex}
\ifPDFTeX
\usepackage[T1]{fontenc}
\IfFileExists{alphabeta.sty}{
\usepackage{alphabeta}
}{
\usepackage[mathletters]{ucs}
\usepackage[utf8x]{inputenc}
}
\else
\usepackage{fontspec}
\usepackage{unicode-math}
\fi
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
\makeatletter % fix for old versions of grffile with XeLaTeX
\@ifpackagelater{grffile}{2019/11/01}
{
% Do nothing on new versions
}
{
\def\Gread@@xetex#1{%
\IfFileExists{"\Gin@base".bb}%
{\Gread@eps{\[email protected]}}%
{\Gread@@xetex@aux#1}%
}
}
\makeatother
\usepackage[Export]{adjustbox} % Used to constrain images to a maximum size
\adjustboxset{max size={0.9\linewidth}{0.9\paperheight}}
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
% The default LaTeX title has an obnoxious amount of whitespace. By default,
% titling removes some of it. It also provides customization options.
\usepackage{titling}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage{array} % table support for pandoc >= 2.11.3
\usepackage{calc} % table minipage width calculation for pandoc >= 2.11.1
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
\usepackage{mathrsfs}
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
\definecolor{ansi-default-inverse-fg}{HTML}{FFFFFF}
\definecolor{ansi-default-inverse-bg}{HTML}{000000}
% common color for the border for error outputs.
\definecolor{outerrorbackground}{HTML}{FFDFDF}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatibility definitions
\def\gt{>}
\def\lt{<}
\let\Oldtex\TeX
\let\Oldlatex\LaTeX
\renewcommand{\TeX}{\textrm{\Oldtex}}
\renewcommand{\LaTeX}{\textrm{\Oldlatex}}
% Document parameters
% Document title
\title{surface\_barrier}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\@namedef{PY@tok@w}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\@namedef{PY@tok@c}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\@namedef{PY@tok@cp}{\def\PY@tc##1{\textcolor[rgb]{0.61,0.40,0.00}{##1}}}
\@namedef{PY@tok@k}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@kp}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@kt}{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\@namedef{PY@tok@o}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@ow}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\@namedef{PY@tok@nb}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@nf}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\@namedef{PY@tok@nc}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\@namedef{PY@tok@nn}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\@namedef{PY@tok@ne}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.80,0.25,0.22}{##1}}}
\@namedef{PY@tok@nv}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@no}{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\@namedef{PY@tok@nl}{\def\PY@tc##1{\textcolor[rgb]{0.46,0.46,0.00}{##1}}}
\@namedef{PY@tok@ni}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.44,0.44,0.44}{##1}}}
\@namedef{PY@tok@na}{\def\PY@tc##1{\textcolor[rgb]{0.41,0.47,0.13}{##1}}}
\@namedef{PY@tok@nt}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@nd}{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\@namedef{PY@tok@s}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@sd}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@si}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.64,0.35,0.47}{##1}}}
\@namedef{PY@tok@se}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.36,0.12}{##1}}}
\@namedef{PY@tok@sr}{\def\PY@tc##1{\textcolor[rgb]{0.64,0.35,0.47}{##1}}}
\@namedef{PY@tok@ss}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@sx}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@m}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@gh}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\@namedef{PY@tok@gu}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\@namedef{PY@tok@gd}{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\@namedef{PY@tok@gi}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.52,0.00}{##1}}}
\@namedef{PY@tok@gr}{\def\PY@tc##1{\textcolor[rgb]{0.89,0.00,0.00}{##1}}}
\@namedef{PY@tok@ge}{\let\PY@it=\textit}
\@namedef{PY@tok@gs}{\let\PY@bf=\textbf}
\@namedef{PY@tok@gp}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\@namedef{PY@tok@go}{\def\PY@tc##1{\textcolor[rgb]{0.44,0.44,0.44}{##1}}}
\@namedef{PY@tok@gt}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\@namedef{PY@tok@err}{\def\PY@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}}
\@namedef{PY@tok@kc}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@kd}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@kn}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@kr}{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@bp}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\@namedef{PY@tok@fm}{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\@namedef{PY@tok@vc}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@vg}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@vi}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@vm}{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\@namedef{PY@tok@sa}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@sb}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@sc}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@dl}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@s2}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@sh}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@s1}{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\@namedef{PY@tok@mb}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@mf}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@mh}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@mi}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@il}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@mo}{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\@namedef{PY@tok@ch}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\@namedef{PY@tok@cm}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\@namedef{PY@tok@cpf}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\@namedef{PY@tok@c1}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\@namedef{PY@tok@cs}{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.24,0.48,0.48}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% For linebreaks inside Verbatim environment from package fancyvrb.
\makeatletter
\newbox\Wrappedcontinuationbox
\newbox\Wrappedvisiblespacebox
\newcommand*\Wrappedvisiblespace {\textcolor{red}{\textvisiblespace}}
\newcommand*\Wrappedcontinuationsymbol {\textcolor{red}{\llap{\tiny$\m@th\hookrightarrow$}}}
\newcommand*\Wrappedcontinuationindent {3ex }
\newcommand*\Wrappedafterbreak {\kern\Wrappedcontinuationindent\copy\Wrappedcontinuationbox}
% Take advantage of the already applied Pygments mark-up to insert
% potential linebreaks for TeX processing.
% {, <, #, %, $, ' and ": go to next line.
% _, }, ^, &, >, - and ~: stay at end of broken line.
% Use of \textquotesingle for straight quote.
\newcommand*\Wrappedbreaksatspecials {%
\def\PYGZus{\discretionary{\char`\_}{\Wrappedafterbreak}{\char`\_}}%
\def\PYGZob{\discretionary{}{\Wrappedafterbreak\char`\{}{\char`\{}}%
\def\PYGZcb{\discretionary{\char`\}}{\Wrappedafterbreak}{\char`\}}}%
\def\PYGZca{\discretionary{\char`\^}{\Wrappedafterbreak}{\char`\^}}%
\def\PYGZam{\discretionary{\char`\&}{\Wrappedafterbreak}{\char`\&}}%
\def\PYGZlt{\discretionary{}{\Wrappedafterbreak\char`\<}{\char`\<}}%
\def\PYGZgt{\discretionary{\char`\>}{\Wrappedafterbreak}{\char`\>}}%
\def\PYGZsh{\discretionary{}{\Wrappedafterbreak\char`\#}{\char`\#}}%
\def\PYGZpc{\discretionary{}{\Wrappedafterbreak\char`\%}{\char`\%}}%
\def\PYGZdl{\discretionary{}{\Wrappedafterbreak\char`\$}{\char`\$}}%
\def\PYGZhy{\discretionary{\char`\-}{\Wrappedafterbreak}{\char`\-}}%
\def\PYGZsq{\discretionary{}{\Wrappedafterbreak\textquotesingle}{\textquotesingle}}%
\def\PYGZdq{\discretionary{}{\Wrappedafterbreak\char`\"}{\char`\"}}%
\def\PYGZti{\discretionary{\char`\~}{\Wrappedafterbreak}{\char`\~}}%
}
% Some characters . , ; ? ! / are not pygmentized.
% This macro makes them "active" and they will insert potential linebreaks
\newcommand*\Wrappedbreaksatpunct {%
\lccode`\~`\.\lowercase{\def~}{\discretionary{\hbox{\char`\.}}{\Wrappedafterbreak}{\hbox{\char`\.}}}%
\lccode`\~`\,\lowercase{\def~}{\discretionary{\hbox{\char`\,}}{\Wrappedafterbreak}{\hbox{\char`\,}}}%
\lccode`\~`\;\lowercase{\def~}{\discretionary{\hbox{\char`\;}}{\Wrappedafterbreak}{\hbox{\char`\;}}}%
\lccode`\~`\:\lowercase{\def~}{\discretionary{\hbox{\char`\:}}{\Wrappedafterbreak}{\hbox{\char`\:}}}%
\lccode`\~`\?\lowercase{\def~}{\discretionary{\hbox{\char`\?}}{\Wrappedafterbreak}{\hbox{\char`\?}}}%
\lccode`\~`\!\lowercase{\def~}{\discretionary{\hbox{\char`\!}}{\Wrappedafterbreak}{\hbox{\char`\!}}}%
\lccode`\~`\/\lowercase{\def~}{\discretionary{\hbox{\char`\/}}{\Wrappedafterbreak}{\hbox{\char`\/}}}%
\catcode`\.\active
\catcode`\,\active
\catcode`\;\active
\catcode`\:\active
\catcode`\?\active
\catcode`\!\active
\catcode`\/\active
\lccode`\~`\~
}
\makeatother
\let\OriginalVerbatim=\Verbatim
\makeatletter
\renewcommand{\Verbatim}[1][1]{%
%\parskip\z@skip
\sbox\Wrappedcontinuationbox {\Wrappedcontinuationsymbol}%
\sbox\Wrappedvisiblespacebox {\FV@SetupFont\Wrappedvisiblespace}%
\def\FancyVerbFormatLine ##1{\hsize\linewidth
\vtop{\raggedright\hyphenpenalty\z@\exhyphenpenalty\z@
\doublehyphendemerits\z@\finalhyphendemerits\z@
\strut ##1\strut}%
}%
% If the linebreak is at a space, the latter will be displayed as visible
% space at end of first line, and a continuation symbol starts next line.
% Stretch/shrink are however usually zero for typewriter font.
\def\FV@Space {%
\nobreak\hskip\z@ plus\fontdimen3\font minus\fontdimen4\font
\discretionary{\copy\Wrappedvisiblespacebox}{\Wrappedafterbreak}
{\kern\fontdimen2\font}%
}%
% Allow breaks at special characters using \PYG... macros.
\Wrappedbreaksatspecials
% Breaks at punctuation characters . , ; ? ! and / need catcode=\active
\OriginalVerbatim[#1,codes*=\Wrappedbreaksatpunct]%
}
\makeatother
% Exact colors from NB
\definecolor{incolor}{HTML}{303F9F}
\definecolor{outcolor}{HTML}{D84315}
\definecolor{cellborder}{HTML}{CFCFCF}
\definecolor{cellbackground}{HTML}{F7F7F7}
% prompt
\makeatletter
\newcommand{\boxspacing}{\kern\kvtcb@left@rule\kern\kvtcb@boxsep}
\makeatother
\newcommand{\prompt}[4]{
{\ttfamily\llap{{\color{#2}[#3]:\hspace{3pt}#4}}\vspace{-\baselineskip}}
}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\hypertarget{surface-barrier}{%
\section{Surface Barrier}\label{surface-barrier}}
\hypertarget{functions}{%
\subsection{Functions}\label{functions}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{1}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} \PYZpc{}load ../../setup.py}
\PY{l+s+sd}{\PYZdq{}\PYZdq{}\PYZdq{}}
\PY{l+s+sd}{Packages for plotting and other stuff}
\PY{l+s+sd}{version: 5.0}
\PY{l+s+sd}{author: Riasat}
\PY{l+s+sd}{\PYZdq{}\PYZdq{}\PYZdq{}}
\PY{c+c1}{\PYZsh{} uncomment the below line to use interactive plots}
\PY{c+c1}{\PYZsh{} \PYZpc{}matplotlib widget}
\PY{c+c1}{\PYZsh{} data loading}
\PY{k+kn}{import} \PY{n+nn}{pandas} \PY{k}{as} \PY{n+nn}{pd}
\PY{c+c1}{\PYZsh{} data maipulation}
\PY{k+kn}{import} \PY{n+nn}{pwlf}
\PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np}
\PY{k+kn}{from} \PY{n+nn}{scipy}\PY{n+nn}{.}\PY{n+nn}{interpolate} \PY{k+kn}{import} \PY{n}{interp1d}\PY{p}{,} \PY{n}{UnivariateSpline}
\PY{k+kn}{from} \PY{n+nn}{scipy}\PY{n+nn}{.}\PY{n+nn}{signal} \PY{k+kn}{import} \PY{n}{find\PYZus{}peaks}
\PY{k+kn}{from} \PY{n+nn}{scipy} \PY{k+kn}{import} \PY{n}{optimize}
\PY{c+c1}{\PYZsh{} plotting tools}
\PY{k+kn}{import} \PY{n+nn}{matplotlib}\PY{n+nn}{.}\PY{n+nn}{pyplot} \PY{k}{as} \PY{n+nn}{plt}
\PY{c+c1}{\PYZsh{} extra tweaks}
\PY{k+kn}{import} \PY{n+nn}{warnings}
\PY{n}{warnings}\PY{o}{.}\PY{n}{filterwarnings}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{ignore}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} plot tweaks}
\PY{n}{plt}\PY{o}{.}\PY{n}{style}\PY{o}{.}\PY{n}{use}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{seaborn\PYZhy{}poster}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{pd}\PY{o}{.}\PY{n}{options}\PY{o}{.}\PY{n}{display}\PY{o}{.}\PY{n}{max\PYZus{}columns} \PY{o}{=} \PY{k+kc}{None}
\PY{n}{pd}\PY{o}{.}\PY{n}{options}\PY{o}{.}\PY{n}{display}\PY{o}{.}\PY{n}{float\PYZus{}format} \PY{o}{=} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+si}{\PYZob{}:.5f\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}\PY{o}{.}\PY{n}{format}
\PY{c+c1}{\PYZsh{} function for extrapolation}
\PY{k}{def} \PY{n+nf}{extrapolate1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{n}{f} \PY{o}{=} \PY{n}{interp1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{n}{kind}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{linear}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{fill\PYZus{}value}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{extrapolate}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{a} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,} \PY{n}{x}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{b} \PY{o}{=} \PY{n}{f}\PY{p}{(}\PY{n}{a}\PY{p}{)}
\PY{k}{return} \PY{n}{a}\PY{p}{,} \PY{n}{b}
\PY{c+c1}{\PYZsh{} function for interpolation}
\PY{k}{def} \PY{n+nf}{interpolate1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{n}{f} \PY{o}{=} \PY{n}{interp1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{n}{kind}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{linear}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{fill\PYZus{}value}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{extrapolate}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{a} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{,} \PY{n}{x}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{b} \PY{o}{=} \PY{n}{f}\PY{p}{(}\PY{n}{a}\PY{p}{)}
\PY{k}{return} \PY{n}{a}\PY{p}{,} \PY{n}{b}
\PY{c+c1}{\PYZsh{} function for interpolation}
\PY{k}{def} \PY{n+nf}{interpolate2d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{n}{f} \PY{o}{=} \PY{n}{interp1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{n}{kind}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{quadratic}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{fill\PYZus{}value}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{extrapolate}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{a} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{,} \PY{n}{x}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{b} \PY{o}{=} \PY{n}{f}\PY{p}{(}\PY{n}{a}\PY{p}{)}
\PY{k}{return} \PY{n}{a}\PY{p}{,} \PY{n}{b}
\PY{c+c1}{\PYZsh{} function for interpolation}
\PY{k}{def} \PY{n+nf}{interpolate3d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{n}{f} \PY{o}{=} \PY{n}{interp1d}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{n}{kind}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{cubic}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{fill\PYZus{}value}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{extrapolate}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{a} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{,} \PY{n}{x}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{b} \PY{o}{=} \PY{n}{f}\PY{p}{(}\PY{n}{a}\PY{p}{)}
\PY{k}{return} \PY{n}{a}\PY{p}{,} \PY{n}{b}
\PY{c+c1}{\PYZsh{} funciton for polynomial fitting}
\PY{k}{def} \PY{n+nf}{polfit}\PY{p}{(}\PY{n}{a}\PY{p}{,} \PY{n}{b}\PY{p}{,} \PY{n}{c}\PY{p}{)}\PY{p}{:}
\PY{n}{z} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{polyfit}\PY{p}{(}\PY{n}{a}\PY{p}{,} \PY{n}{b}\PY{p}{,} \PY{n}{c}\PY{p}{)}
\PY{n}{f} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{poly1d}\PY{p}{(}\PY{n}{z}\PY{p}{)}
\PY{n}{x} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{n}{a}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{,} \PY{n}{a}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{a}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{y} \PY{o}{=} \PY{n}{f}\PY{p}{(}\PY{n}{x}\PY{p}{)}
\PY{k}{return} \PY{n}{x}\PY{p}{,} \PY{n}{y}
\PY{c+c1}{\PYZsh{} function for picewise linear fit}
\PY{k}{def} \PY{n+nf}{picewise\PYZus{}linear\PYZus{}fit}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{n}{segments}\PY{p}{)}\PY{p}{:}
\PY{n}{my\PYZus{}pwlf} \PY{o}{=} \PY{n}{pwlf}\PY{o}{.}\PY{n}{PiecewiseLinFit}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)} \PY{c+c1}{\PYZsh{} fit my data}
\PY{n}{res} \PY{o}{=} \PY{n}{my\PYZus{}pwlf}\PY{o}{.}\PY{n}{fit}\PY{p}{(}\PY{n}{segments}\PY{p}{)} \PY{c+c1}{\PYZsh{} fit the data for n line segments}
\PY{c+c1}{\PYZsh{} slopes = myPWLF.calc\PYZus{}slopes() \PYZsh{} calculate slopes}
\PY{c+c1}{\PYZsh{} predict for the determined points}
\PY{n}{xHat} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{linspace}\PY{p}{(}\PY{n+nb}{min}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{,} \PY{n+nb}{max}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{,} \PY{n}{num}\PY{o}{=}\PY{l+m+mi}{10000}\PY{p}{)}
\PY{n}{yHat} \PY{o}{=} \PY{n}{my\PYZus{}pwlf}\PY{o}{.}\PY{n}{predict}\PY{p}{(}\PY{n}{xHat}\PY{p}{)}
\PY{c+c1}{\PYZsh{} calculate statistics}
\PY{c+c1}{\PYZsh{} p = myPWLF.p\PYZus{}values(method=\PYZdq{}non\PYZhy{}linear\PYZdq{}, step\PYZus{}size=1e\PYZhy{}4) \PYZsh{} p\PYZhy{}values}
\PY{c+c1}{\PYZsh{} se = myPWLF.se \PYZsh{} standard errors}
\PY{k}{return} \PY{n}{xHat}\PY{p}{,} \PY{n}{yHat}
\PY{c+c1}{\PYZsh{} curve fit}
\PY{k}{def} \PY{n+nf}{cur\PYZus{}fit}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{n}{func} \PY{o}{=} \PY{k}{lambda} \PY{n}{t}\PY{p}{,} \PY{n}{a}\PY{p}{,} \PY{n}{c}\PY{p}{,} \PY{n}{d}\PY{p}{:} \PY{n}{a} \PY{o}{*} \PY{n}{np}\PY{o}{.}\PY{n}{log}\PY{p}{(}\PY{n}{t} \PY{o}{+} \PY{n}{c}\PY{p}{)} \PY{o}{+} \PY{n}{d}
\PY{n}{popt}\PY{p}{,} \PY{n}{pcov} \PY{o}{=} \PY{n}{optimize}\PY{o}{.}\PY{n}{curve\PYZus{}fit}\PY{p}{(}\PY{n}{func}\PY{p}{,} \PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)} \PY{c+c1}{\PYZsh{} type: ignore}
\PY{n}{xx} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{arange}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{,} \PY{n}{x}\PY{p}{[}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}
\PY{n}{yy} \PY{o}{=} \PY{n}{func}\PY{p}{(}\PY{n}{xx}\PY{p}{,} \PY{o}{*}\PY{n}{popt}\PY{p}{)}
\PY{k}{return} \PY{n}{xx}\PY{p}{,} \PY{n}{yy}
\PY{k}{def} \PY{n+nf}{estimate\PYZus{}coef}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} number of observations/points}
\PY{n}{n} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{size}\PY{p}{(}\PY{n}{x}\PY{p}{)}
\PY{c+c1}{\PYZsh{} mean of x and y vector}
\PY{n}{m\PYZus{}x} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x}\PY{p}{)}
\PY{n}{m\PYZus{}y} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{y}\PY{p}{)}
\PY{c+c1}{\PYZsh{} calculating cross\PYZhy{}deviation and deviation about x}
\PY{n}{SS\PYZus{}xy} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{y} \PY{o}{*} \PY{n}{x}\PY{p}{)}
\PY{n}{SS\PYZus{}xx} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x} \PY{o}{*} \PY{n}{x}\PY{p}{)}
\PY{n}{deno} \PY{o}{=} \PY{n}{n} \PY{o}{*} \PY{n}{SS\PYZus{}xx} \PY{o}{\PYZhy{}} \PY{n}{m\PYZus{}x} \PY{o}{*} \PY{n}{m\PYZus{}x}
\PY{c+c1}{\PYZsh{} calculating regression coefficients}
\PY{n}{b} \PY{o}{=} \PY{p}{(}\PY{n}{n} \PY{o}{*} \PY{n}{SS\PYZus{}xy} \PY{o}{\PYZhy{}} \PY{n}{m\PYZus{}x} \PY{o}{*} \PY{n}{m\PYZus{}y}\PY{p}{)} \PY{o}{/} \PY{n}{deno}
\PY{n}{a} \PY{o}{=} \PY{p}{(}\PY{n}{m\PYZus{}y} \PY{o}{*} \PY{n}{SS\PYZus{}xx} \PY{o}{\PYZhy{}} \PY{n}{m\PYZus{}x} \PY{o}{*} \PY{n}{SS\PYZus{}xy}\PY{p}{)} \PY{o}{/} \PY{n}{deno}
\PY{k}{return} \PY{p}{(}\PY{n}{a}\PY{p}{,} \PY{n}{b}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\hypertarget{data}{%
\subsection{Data}\label{data}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{2}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{n}{file\PYZus{}name} \PY{o}{=} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{data\PYZus{}surface.xlsx}\PY{l+s+s2}{\PYZdq{}}
\PY{n}{res\PYZus{}name} \PY{o}{=} \PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Am\PYZhy{}241}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Pu\PYZhy{}239}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Cm\PYZhy{}244}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} calibration data}
\PY{n}{data\PYZus{}cesium\PYZus{}calib} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}excel}\PY{p}{(}\PY{n}{file\PYZus{}name}\PY{p}{,} \PY{n}{sheet\PYZus{}name}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{calibration}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{peak\PYZus{}channel} \PY{o}{=} \PY{n}{data\PYZus{}cesium\PYZus{}calib}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{calib\PYZus{}channel}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}
\PY{n}{known\PYZus{}energy} \PY{o}{=} \PY{n}{data\PYZus{}cesium\PYZus{}calib}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{calib\PYZus{}energy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}
\PY{n}{data\PYZus{}am} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}excel}\PY{p}{(}\PY{n}{file\PYZus{}name}\PY{p}{,} \PY{n}{sheet\PYZus{}name}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{thick}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{left\PYZus{}air} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{left\PYZus{}air}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{right\PYZus{}air} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{right\PYZus{}air}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{left\PYZus{}vacc} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{left\PYZus{}vacc}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{right\PYZus{}vacc} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{right\PYZus{}vacc}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{vaccum\PYZus{}peak} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{vaccum\PYZus{}peak}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{air\PYZus{}peak} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{air\PYZus{}peak}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{thickness} \PY{o}{=} \PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{thickness}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n+nb}{print}\PY{p}{(}\PY{n}{data\PYZus{}cesium\PYZus{}calib}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
calib\_channel calib\_energy
0 1673 5.49000
1 1575 5.16000
2 1737 5.81000
\end{Verbatim}
\hypertarget{calibration}{%
\subsection{Calibration}\label{calibration}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{3}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} extrapolated points}
\PY{n}{peak\PYZus{}channel\PYZus{}fit}\PY{p}{,} \PY{n}{known\PYZus{}energy\PYZus{}fit} \PY{o}{=} \PY{n}{polfit}\PY{p}{(}\PY{n}{peak\PYZus{}channel}\PY{p}{,} \PY{n}{known\PYZus{}energy}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}
\PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext} \PY{o}{=} \PY{n}{extrapolate1d}\PY{p}{(}\PY{n}{peak\PYZus{}channel\PYZus{}fit}\PY{p}{,} \PY{n}{known\PYZus{}energy\PYZus{}fit}\PY{p}{)}
\PY{n}{ckt} \PY{o}{=} \PY{p}{[}\PY{n}{item} \PY{k}{for} \PY{n}{item} \PY{o+ow}{in} \PY{n}{cal\PYZus{}eng\PYZus{}ext} \PY{k}{if} \PY{n}{item} \PY{o}{\PYZgt{}}\PY{o}{=} \PY{l+m+mi}{0}\PY{p}{]}
\PY{n}{plt}\PY{o}{.}\PY{n}{style}\PY{o}{.}\PY{n}{use}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{seaborn\PYZhy{}poster}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{figure}\PY{p}{(}\PY{n}{figsize}\PY{o}{=}\PY{p}{(}\PY{l+m+mi}{15}\PY{p}{,} \PY{l+m+mi}{8}\PY{p}{)}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Calibaration curve}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{xlabel}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Channel Number(V)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{ylabel}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Energy of element(MeV)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{[}\PY{l+m+mi}{275635}\PY{p}{:}\PY{p}{]}\PY{p}{,} \PY{n}{ckt}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fitted curve}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{res\PYZus{}name}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{peak\PYZus{}channel}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{p}{,} \PY{n}{known\PYZus{}energy}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{o}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{n}{res\PYZus{}name}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{annotate}\PY{p}{(}\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{(}\PY{l+s+si}{\PYZob{}}\PY{n}{peak\PYZus{}channel}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+si}{\PYZob{}}\PY{n}{known\PYZus{}energy}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{xy}\PY{o}{=}\PY{p}{(}\PY{n}{peak\PYZus{}channel}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{o}{\PYZhy{}}\PY{l+m+mi}{200}\PY{p}{,}\PY{n}{known\PYZus{}energy}\PY{p}{[}\PY{n}{i}\PY{p}{]}\PY{o}{+}\PY{l+m+mf}{0.05}\PY{p}{)}\PY{p}{,} \PY{n}{fontsize}\PY{o}{=}\PY{l+m+mi}{14}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{legend}\PY{p}{(}\PY{n}{loc}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{upper left}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.3}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{major}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{minorticks\PYZus{}on}\PY{p}{(}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.2}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{minor}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{ls}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{surface_barrier_files/surface_barrier_6_0.png}
\end{center}
{ \hspace*{\fill} \\}
\hypertarget{fwhm}{%
\subsection{FWHM}\label{fwhm}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{4}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} air energy from calibration curve}
\PY{n}{peak\PYZus{}energy\PYZus{}air} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{air\PYZus{}peak}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{peak\PYZus{}energy\PYZus{}vaccum} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{vaccum\PYZus{}peak}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{energy air}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}air}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{energy vaccum}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}vaccum}
\PY{c+c1}{\PYZsh{} print(data\PYZus{}am)}
\end{Verbatim}
\end{tcolorbox}
\hypertarget{thickness-vs.-energy}{%
\subsection{Thickness vs.~energy}\label{thickness-vs.-energy}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{5}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} observations}
\PY{n}{xe} \PY{o}{=} \PY{n}{thickness}
\PY{n}{ye\PYZus{}air} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}air}
\PY{n}{ye\PYZus{}vac} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}vaccum}
\PY{c+c1}{\PYZsh{} estimating coefficients}
\PY{n}{be\PYZus{}air} \PY{o}{=} \PY{n}{estimate\PYZus{}coef}\PY{p}{(}\PY{n}{xe}\PY{p}{,} \PY{n}{ye\PYZus{}air}\PY{p}{)}
\PY{n}{be\PYZus{}vac} \PY{o}{=} \PY{n}{estimate\PYZus{}coef}\PY{p}{(}\PY{n}{xe}\PY{p}{,} \PY{n}{ye\PYZus{}vac}\PY{p}{)}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Estimated coefficients (Air): }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ a = }\PY{l+s+si}{\PYZob{}}\PY{n}{be\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ b = }\PY{l+s+si}{\PYZob{}}\PY{n}{be\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{Estimated coefficients (Vaccum): }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ a = }\PY{l+s+si}{\PYZob{}}\PY{n}{be\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ b = }\PY{l+s+si}{\PYZob{}}\PY{n}{be\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\PY{c+c1}{\PYZsh{} predicted response vector}
\PY{n}{ye\PYZus{}pred\PYZus{}air} \PY{o}{=} \PY{n}{be\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]} \PY{o}{+} \PY{n}{be\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{*} \PY{n}{xe}
\PY{n}{ye\PYZus{}pred\PYZus{}vac} \PY{o}{=} \PY{n}{be\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]} \PY{o}{+} \PY{n}{be\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{*} \PY{n}{xe}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
Estimated coefficients (Air):
a = 4.970
b = -0.230
Estimated coefficients (Vaccum):
a = 5.266
b = -0.218
\end{Verbatim}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{6}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} thickness\PYZus{}fitted\PYZus{}air, peak\PYZus{}energy\PYZus{}air\PYZus{}fitted = polfit(thickness, peak\PYZus{}energy\PYZus{}air, 1)}
\PY{c+c1}{\PYZsh{} thickness\PYZus{}fitted\PYZus{}vaccum , peak\PYZus{}energy\PYZus{}vaccum\PYZus{}fitted = polfit(thickness, peak\PYZus{}energy\PYZus{}vaccum, 1)}
\PY{n}{plt}\PY{o}{.}\PY{n}{style}\PY{o}{.}\PY{n}{use}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{seaborn\PYZhy{}poster}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{figure}\PY{p}{(}\PY{n}{figsize}\PY{o}{=}\PY{p}{(}\PY{l+m+mi}{15}\PY{p}{,} \PY{l+m+mi}{8}\PY{p}{)}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Thickness vs. energy curve of Am\PYZhy{}241}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{xlabel}\PY{p}{(}\PY{l+s+sa}{r}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Thickness (\PYZdl{}}\PY{l+s+s2}{\PYZbs{}}\PY{l+s+s2}{mu m\PYZdl{})}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{ylabel}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Energy (MeV)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{xe}\PY{p}{,} \PY{n}{ye\PYZus{}pred\PYZus{}air}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fitted curve for air}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{thickness}\PY{p}{,} \PY{n}{peak\PYZus{}energy\PYZus{}air}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{o}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{air data points}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{xe}\PY{p}{,} \PY{n}{ye\PYZus{}pred\PYZus{}vac}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fitted curve for vaccum}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{thickness}\PY{p}{,} \PY{n}{peak\PYZus{}energy\PYZus{}vaccum}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{ks}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{vaccum data points}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{legend}\PY{p}{(}\PY{n}{loc}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{upper right}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.3}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{major}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{minorticks\PYZus{}on}\PY{p}{(}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.2}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{minor}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{ls}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{surface_barrier_files/surface_barrier_11_0.png}
\end{center}
{ \hspace*{\fill} \\}
\hypertarget{thickness-vs.-fwhm}{%
\subsection{Thickness vs.~FWHM}\label{thickness-vs.-fwhm}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{7}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{n}{left\PYZus{}air\PYZus{}energy} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{left\PYZus{}air}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{right\PYZus{}air\PYZus{}energy} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{right\PYZus{}air}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{left\PYZus{}air\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{left\PYZus{}air\PYZus{}energy}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{right\PYZus{}air\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{right\PYZus{}air\PYZus{}energy}
\PY{n}{fwhm\PYZus{}air\PYZus{}mev} \PY{o}{=} \PY{n+nb}{abs}\PY{p}{(}\PY{n}{left\PYZus{}air\PYZus{}energy} \PY{o}{\PYZhy{}} \PY{n}{right\PYZus{}air\PYZus{}energy}\PY{p}{)}
\PY{n}{left\PYZus{}vaccum\PYZus{}energy} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{left\PYZus{}vacc}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{right\PYZus{}vaccum\PYZus{}energy} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{interp}\PY{p}{(}\PY{n}{right\PYZus{}vacc}\PY{p}{,} \PY{n}{cal\PYZus{}chan\PYZus{}ext}\PY{p}{,} \PY{n}{cal\PYZus{}eng\PYZus{}ext}\PY{p}{)}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{left\PYZus{}vac\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{left\PYZus{}vaccum\PYZus{}energy}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{right\PYZus{}vac\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{right\PYZus{}vaccum\PYZus{}energy}
\PY{n}{fwhm\PYZus{}vaccum\PYZus{}mev} \PY{o}{=} \PY{n+nb}{abs}\PY{p}{(}\PY{n}{left\PYZus{}vaccum\PYZus{}energy} \PY{o}{\PYZhy{}} \PY{n}{right\PYZus{}vaccum\PYZus{}energy}\PY{p}{)}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{fwhm\PYZus{}air\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{fwhm\PYZus{}air\PYZus{}mev}
\PY{n}{data\PYZus{}am}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{fwhm\PYZus{}vaccum\PYZus{}mev}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{fwhm\PYZus{}vaccum\PYZus{}mev}
\PY{n+nb}{print}\PY{p}{(}\PY{n}{data\PYZus{}am}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
thickness vaccum\_peak air\_peak left\_vacc right\_vacc left\_air \textbackslash{}
0 2 1506 1425 1345 1568 1326
1 4 1367 1303 1206 1446 1138
2 8 1162 1011 862 1255 798
3 10 1090 1001 779 1177 724
4 12 925 829 575 1024 537
right\_air energy air energy vaccum left\_air\_mev right\_air\_mev \textbackslash{}
0 1491 4.54981 4.87045 4.15792 4.81108
1 1372 4.06687 4.32022 3.41371 4.34001
2 1094 2.91098 3.50872 2.06781 3.23953
3 1093 2.87139 3.22370 1.77487 3.23558
4 925 2.19052 2.57054 1.03463 2.57054
left\_vac\_mev right\_vac\_mev fwhm\_air\_mev fwhm\_vaccum\_mev
0 4.23313 5.11588 0.65316 0.88275
1 3.68289 4.63294 0.92630 0.95005
2 2.32115 3.87686 1.17173 1.55571
3 1.99259 3.56809 1.46070 1.57550
4 1.18505 2.96244 1.53591 1.77739
\end{Verbatim}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{8}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{c+c1}{\PYZsh{} observations}
\PY{n}{x} \PY{o}{=} \PY{n}{thickness}
\PY{n}{y\PYZus{}air} \PY{o}{=} \PY{n}{fwhm\PYZus{}air\PYZus{}mev}
\PY{n}{y\PYZus{}vac} \PY{o}{=} \PY{n}{fwhm\PYZus{}vaccum\PYZus{}mev}
\PY{c+c1}{\PYZsh{} estimating coefficients}
\PY{n}{b\PYZus{}air} \PY{o}{=} \PY{n}{estimate\PYZus{}coef}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y\PYZus{}air}\PY{p}{)}
\PY{n}{b\PYZus{}vac} \PY{o}{=} \PY{n}{estimate\PYZus{}coef}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y\PYZus{}vac}\PY{p}{)}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Estimated coefficients (Air): }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ a = }\PY{l+s+si}{\PYZob{}}\PY{n}{b\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ b = }\PY{l+s+si}{\PYZob{}}\PY{n}{b\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{Estimated coefficients (Vaccum): }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ a = }\PY{l+s+si}{\PYZob{}}\PY{n}{b\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ b = }\PY{l+s+si}{\PYZob{}}\PY{n}{b\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{l+s+si}{:}\PY{l+s+s2}{.3f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\PY{c+c1}{\PYZsh{} predicted response vector}
\PY{n}{y\PYZus{}pred\PYZus{}air} \PY{o}{=} \PY{n}{b\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]} \PY{o}{+} \PY{n}{b\PYZus{}air}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{*} \PY{n}{x}
\PY{n}{y\PYZus{}pred\PYZus{}vac} \PY{o}{=} \PY{n}{b\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]} \PY{o}{+} \PY{n}{b\PYZus{}vac}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{*} \PY{n}{x}
\PY{n}{plt}\PY{o}{.}\PY{n}{style}\PY{o}{.}\PY{n}{use}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{seaborn\PYZhy{}poster}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{figure}\PY{p}{(}\PY{n}{figsize}\PY{o}{=}\PY{p}{(}\PY{l+m+mi}{15}\PY{p}{,} \PY{l+m+mi}{8}\PY{p}{)}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Thickness vs. FWHM curve of Am\PYZhy{}241}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{xlabel}\PY{p}{(}\PY{l+s+sa}{r}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Thickness (\PYZdl{}}\PY{l+s+s2}{\PYZbs{}}\PY{l+s+s2}{mu\PYZdl{}m)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{ylabel}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{FWHM (MeV)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y\PYZus{}pred\PYZus{}air}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fitted curve for air}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{thickness}\PY{p}{,} \PY{n}{fwhm\PYZus{}air\PYZus{}mev}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{o}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{air data points}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y\PYZus{}pred\PYZus{}vac}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{fitted curve for vaccum}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{thickness}\PY{p}{,} \PY{n}{fwhm\PYZus{}vaccum\PYZus{}mev}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{ks}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{vaccum data points}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{legend}\PY{p}{(}\PY{n}{loc}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{upper left}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.3}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{major}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{minorticks\PYZus{}on}\PY{p}{(}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{grid}\PY{p}{(}\PY{n}{alpha}\PY{o}{=}\PY{l+m+mf}{0.2}\PY{p}{,} \PY{n}{which}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{minor}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{ls}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZhy{}\PYZhy{}}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
Estimated coefficients (Air):
a = 0.518
b = 0.088
Estimated coefficients (Vaccum):
a = 0.662
b = 0.095
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{surface_barrier_files/surface_barrier_14_1.png}
\end{center}
{ \hspace*{\fill} \\}
\hypertarget{linear-regression-table}{%
\subsubsection{Linear regression table}\label{linear-regression-table}}
\hypertarget{energy-vs.-thickness}{%
\paragraph{energy vs.~thickness}\label{energy-vs.-thickness}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{9}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{n}{data\PYZus{}air} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{n}{x} \PY{o}{=} \PY{n}{thickness}
\PY{n}{ya} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}air}
\PY{n}{xx} \PY{o}{=} \PY{n}{thickness} \PY{o}{*} \PY{n}{thickness}
\PY{n}{xya} \PY{o}{=} \PY{n}{thickness} \PY{o}{*} \PY{n}{ya}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{x}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{x}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{y}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{ya}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xx}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xx}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xya}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Air regression: }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+si}{\PYZob{}}\PY{n}{data\PYZus{}air}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sx = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{ya}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxx= }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xx}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xya}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\PY{n}{data\PYZus{}vaccum} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{n}{yv} \PY{o}{=} \PY{n}{peak\PYZus{}energy\PYZus{}vaccum}
\PY{n}{xyv} \PY{o}{=} \PY{n}{thickness}\PY{o}{*}\PY{n}{yv}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{x}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{x}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{y}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{yv}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xx}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xx}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xyv}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ Vaccum regression: }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+si}{\PYZob{}}\PY{n}{data\PYZus{}vaccum}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sx = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{yv}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxx= }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xx}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xyv}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
Air regression:
x y xx xy
0 2 4.54981 4 9.09962
1 4 4.06687 16 16.26748
2 8 2.91098 64 23.28780
3 10 2.87139 100 28.71390
4 12 2.19052 144 26.28625
sx = 36,
sy = 16.58957,
sxx= 328
sxy = 103.65505
Vaccum regression:
x y xx xy
0 2 4.87045 4 9.74091
1 4 4.32022 16 17.28087
2 8 3.50872 64 28.06972
3 10 3.22370 100 32.23700
4 12 2.57054 144 30.84649
sx = 36,
sy = 18.49363,
sxx= 328
sxy = 118.17498
\end{Verbatim}
\hypertarget{fwhm-vs-thickness}{%
\paragraph{fwhm vs thickness}\label{fwhm-vs-thickness}}
\begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder]
\prompt{In}{incolor}{10}{\boxspacing}
\begin{Verbatim}[commandchars=\\\{\}]
\PY{n}{data\PYZus{}air} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{n}{x} \PY{o}{=} \PY{n}{thickness}
\PY{n}{yaf} \PY{o}{=} \PY{n}{fwhm\PYZus{}air\PYZus{}mev}
\PY{n}{xx} \PY{o}{=} \PY{n}{thickness} \PY{o}{*} \PY{n}{thickness}
\PY{n}{xyaf} \PY{o}{=} \PY{n}{thickness} \PY{o}{*} \PY{n}{yaf}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{x}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{x}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{y}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{yaf}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xx}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xx}
\PY{n}{data\PYZus{}air}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xyaf}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Air regression: }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+si}{\PYZob{}}\PY{n}{data\PYZus{}air}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sx = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{yaf}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxx= }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xx}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xyaf}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\PY{n}{data\PYZus{}vaccum} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{n}{yvf} \PY{o}{=} \PY{n}{fwhm\PYZus{}vaccum\PYZus{}mev}
\PY{n}{xyvf} \PY{o}{=} \PY{n}{thickness}\PY{o}{*}\PY{n}{yvf}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{x}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{x}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{y}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{yvf}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xx}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xx}
\PY{n}{data\PYZus{}vaccum}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{xy}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]} \PY{o}{=} \PY{n}{xyvf}
\PY{n+nb}{print}\PY{p}{(}
\PY{l+s+sa}{f}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ Vaccum regression: }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+si}{\PYZob{}}\PY{n}{data\PYZus{}vaccum}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}}
\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sx = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{yvf}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{, }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxx= }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xx}\PY{p}{)}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{ sxy = }\PY{l+s+si}{\PYZob{}}\PY{n}{np}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{n}{xyvf}\PY{p}{)}\PY{l+s+si}{:}\PY{l+s+s2}{.5f}\PY{l+s+si}{\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}
\PY{p}{)}
\end{Verbatim}
\end{tcolorbox}
\begin{Verbatim}[commandchars=\\\{\}]
Air regression:
x y xx xy
0 2 0.65316 4 1.30632
1 4 0.92630 16 3.70520
2 8 1.17173 64 9.37383
3 10 1.46070 100 14.60702
4 12 1.53591 144 18.43097
sx = 36,
sy = 5.74780,
sxx= 328
sxy = 47.42333
Vaccum regression:
x y xx xy
0 2 0.88275 4 1.76551
1 4 0.95005 16 3.80020
2 8 1.55571 64 12.44566
3 10 1.57550 100 15.75500
4 12 1.77739 144 21.32862
sx = 36,
sy = 6.74140,
sxx= 328
sxy = 55.09499
\end{Verbatim}
% Add a bibliography block to the postdoc
\end{document}
| {
"alphanum_fraction": 0.5639934425,
"avg_line_length": 66.3615384615,
"ext": "tex",
"hexsha": "2b84f89c9d51cc621c9e478835b0349bce310643",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "df434f087677e5e15bc4659cdf17cbf3f4b6a85e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "okarin001/Practical-Analysis",
"max_forks_repo_path": "4th sem practicals/surface barrier detector/surface_barrier.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "df434f087677e5e15bc4659cdf17cbf3f4b6a85e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "okarin001/Practical-Analysis",
"max_issues_repo_path": "4th sem practicals/surface barrier detector/surface_barrier.tex",
"max_line_length": 728,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "df434f087677e5e15bc4659cdf17cbf3f4b6a85e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "okarin001/Practical-Analysis",
"max_stars_repo_path": "4th sem practicals/surface barrier detector/surface_barrier.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 28114,
"size": 60389
} |
\section{Software Updates}
\label{sec:software-updates}
The distinction between protocol parameter and application updates is as
follows: \newline
\noindent \textbf{Protocol parameters}
\begin{itemize}
\item All parameters excluding $\ProtVer$: constants
currently used in ledger calculations performed according to the rules described
in this document
\begin{itemize}
\item[$\circ$] Updated only at epoch boundary
\item[$\circ$] All nodes automatically adopt new values (this mechanism is an explicit
part of the ledger rules, and is included in the epoch boundary transition)
\item[$\circ$] Has no effect on the logic of the ledger transition rules
\end{itemize}
\item Protocol version parameter ($\ProtVer$): a special value which
corresponds to a specific version of the \textit{ledger rules}
\begin{itemize}
\item[$\circ$] I.e. if $\var{pv}$ changes, this document may have to be updated with
the new rules and types if there is a change in the logic
\item[$\circ$] E.g. the change may be that the new rules now allow \textit{all} nodes
to vote on update proposals
\item[$\circ$] Whether the $\var{pv}$ must change with any change of protocol
parameters when the \textit{rules do not change} is to be decided
\item[$\circ$] Mechanism for updating is inherited from the general protocol
parameter update and voting rules
\item[$\circ$] If there is a change in transition rules, nodes must have
software installed that can implement these rules at the epoch boundary
when the protocol parameter adoption occurs
\item[$\circ$] Switching to using these new rules is mandatory in the sense that
if the nodes do not have the applications implementing them, this
will prevent a user from reading and
writing to the ledger
\end{itemize}
\end{itemize}
\textbf{Applications} The version of the software the nodes run,
as well as the related system tags and metadata
\begin{itemize}
\item[$\circ$] We cannot force the users to actually upgrade their software
\item[$\circ$] Any application version that is capable of implementing the protocol version
currently on the ledger can be used by a node
\item[$\circ$] Users can update applications as soon as update is agreed upon, and should
do so before their current application becomes incompatible with the
current protocol version (due to the update), however
\item[$\circ$] A voted on application version update has a recommended adoption date,
which applications will automatically follow
\end{itemize}
Applications must sometimes support \textit{several different versions}
of ledger rules in order to accommodate the timely switch of the $\ProtVer$ at the
epoch boundary. Usually, the currently accepted protocol version, and next the
version they are ready to upgrade to (that their application versions can
implement).
The newest protocol version a node is ready to use is included in the block
header of the blocks it produces, see \ref{sec:defs-blocks}. This is either
\begin{itemize}
\item the current version (if there is no protocol version update pending or the node
has not updated to an upcoming software version capable of of implementing a
newer protocol version), or
\item the next protocol version,
(if the node has updated its software, but the current protocol version on the
ledger has not been updated yet).
\end{itemize}
So, users have some agency in the process of adoption of
new protocol versions. They may refuse to download and install updates.
Since software updates cannot be \textit{forced} on the users, if the majority of
users do not perform an update which allows to switch to the next $\ProtVer$,
it cannot happen.
There is no data in blocks or transactions that says what application
versions a user has, or what protocol version they are using (this always has to
be the version recorded on the ledger).
Having the wrong version of an application
may potentially become problematic (when it is not able to follow the current
ledger rules dictated by $\ProtVer$), however, the update mechanism implemented
in the node software should
ensure this does not happen often.
The process of upgrading the system a new version consists of:
\begin{enumerate}
\item New software is ready for downloading.
\item The core nodes propose changing
the application version to this
new software. The the voting and proposal of application version updates is discussed
in Section \ref{sec:update}.
\item If there is concensus, this application version becomes the current application version,
see Section \ref{sec:ledger-trans}.
\item All nodes see a new application version on the ledger and update their
software.
\end{enumerate}
Note that if there is a \textit{new protocol version} implemented by this new
software, the core nodes monitor how many nodes are ready to use the new
protocol version via the block headers.
Once enough nodes are ready for the new protocol version, this
may now be updated as well (by the mechanism in described in
Section \ref{sec:update}).
| {
"alphanum_fraction": 0.7897384306,
"avg_line_length": 48.2524271845,
"ext": "tex",
"hexsha": "1b302b13af71b23763e6009006259da65b3a89fe",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fa3a3d797e2db870cefc4f9ba602136c27d3edff",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "mhuesch/cardano-ledger-specs",
"max_forks_repo_path": "shelley/chain-and-ledger/formal-spec/software-updates.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fa3a3d797e2db870cefc4f9ba602136c27d3edff",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "mhuesch/cardano-ledger-specs",
"max_issues_repo_path": "shelley/chain-and-ledger/formal-spec/software-updates.tex",
"max_line_length": 96,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fa3a3d797e2db870cefc4f9ba602136c27d3edff",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "mhuesch/cardano-ledger-specs",
"max_stars_repo_path": "shelley/chain-and-ledger/formal-spec/software-updates.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1122,
"size": 4970
} |
The key intuition behind Roots is that as an intrinsic PaaS service
it has visibility into all the activities that occur in various layers of the cloud,
including all invocations of the PaaS kernel services made by the applications.
Therefore it can automatically collect
data regarding events that are related to application request processing.
The cloud platform can then analyze the collected data offline (but in near realtime) to detect
performance anomalies and identify root causes.
We argue that data collection can be implemented efficiently in the cloud platform so as to not
introduce a significant overhead to deployed applications.
Moreover, data collection can be always active in the cloud thus relieving the application developers
from having to instrument their code, or setting up external monitoring.
The data analysis can benefit from the vast amount of compute
resources available in the cloud platform. The offline processing ensures that request
latency is not impacted by monitoring, and the near realtime analysis ensures that developers
and other interested parties are notified of performance anomalies urgently.
\subsubsection{Data Collection and Correlation}
There are two issues that need to be addressed when designing a monitoring framework for
a system as complex as a PaaS cloud.
\begin{enumerate}
\item Collecting data from multiple different layers.
\item Correlating data collected from different layers.
\end{enumerate}
Each individual layer of the cloud platform is only able to collect data regarding the
state changes that are local to it. They cannot monitor state changes
in other layers due to the level of encapsulation provided by each layer. However,
processing an application request involves cooperation of multiple
layers. For example, an application request gets routed through the front-end server before
it reaches the application server. The application server may then invoke one or more PaaS kernel
services. Depending on how the kernel services are implemented,
even more layers may get involved in processing the request.
In order to facilitate systemwide monitoring and
bottleneck identification, we should be able to gather data from all the different layers involved
in processing a request. Then we should be able to correlate the collected data, and tie the events that
are related to the same request together.
To facilitate these requirements, we start by augmenting the front-end server of the cloud platform.
Specifically we get the front-end server to tag all incoming application requests with unique identifiers.
This request identifier can be attached to an HTTP request as a header, which is visible to all
internal components of the PaaS cloud. Then we configure the data collecting agents within the
cloud platform to record the request identifiers along with any events they capture.
This way we maintain the relationship between application requests, and the resulting
local state changes in different layers of the cloud, without breaking the existing level
of abstraction in the cloud architecture. This approach is also scalable, since the events are
recorded in a distributed manner without having to maintain any state at the data collecting agents.
The data analysis components can later
aggregate the recorded events by request identifiers to efficiently group the related events,
in on demand fashion.
\begin{figure}
\centering
\includegraphics[scale=0.5]{apm_architecture}
\caption{Roots APM architecture.}
\label{fig:apm_architecture}
\end{figure}
Figure~\ref{fig:apm_architecture} illustrates the high-level architecture of Roots, and how
it fits into the PaaS stack. APM components are shown in grey, with their interactions indicated
by the black lines. The small grey boxes attached to the PaaS components represent the
agents used to instrument the cloud platform for data collection purposes.
In the diagram a user request is getting tagged with the identifier value
$R$ at the front-end server. This identifier is passed down to the lower layers of the cloud
along with the request. Events that occur in the lower layers as a result of processing this request
are recorded with the request identifier $R$, so we can correlate them later. For example, in the
data analysis component we can run a filter query to select all the events related to a particular
request (as shown in the pseudo query in the diagram). Or we can run a ``group by''
query to select all events, and aggregate them by the request identifier.
Figure~\ref{fig:apm_architecture} also shows Roots collecting data from all layers in the
PaaS stack (i.e. full stack monitoring).
From the front-end server layer we gather information related to incoming application
requests. A big part of this is scraping the HTTP server access logs, which indicate request timestamps,
source and destination addressing information, response time (latency) and other HTTP message
parameters. This information is readily available for harvesting in most technologies used as front-end
servers (e.g. Apache HTTPD, Nginx). Additionally we may also collect information pertaining to active
connections, invalid access attempts and other errors.
From the application server layer we collect basic application logs as well as any other
metrics that can be easily collected from the application runtime. This may include some process level
metrics indicating the resource usage of the individual application instances. Additionally, Roots
employs a set of per-application benchmarking processes that periodically probes
different applications
to measure their performance. These are lightweight, stateless processes managed by the Roots framework.
Data collected by these processes will also be sent to data storage component, and will be available
for analysis as per-application time series data.
At the PaaS kernel layer we collect information regarding all kernel invocations
made by the applications. This requires intercepting the PaaS kernel invocations
at runtime. This must be done carefully as to not introduce a noticeable
overhead to the application execution. For each PaaS kernel invocation, we can capture the
following parameters.
\begin{itemize}
\item Source application making the kernel invocation
\item Timestamp
\item A sequence number indicating the order of PaaS kernel invocations within an application request
\item Target kernel service and operation
\item Execution time of the invocation
\item Request size, hash and other parameters
\end{itemize}
Collecting this PaaS kernel invocation details enables tracing the execution of application
requests, without the need for instrumenting application code, which we believe is a feature
unique to Roots.
Finally, at the lowest infrastructure level, we can collect information related to virtual machines, containers
and their resource usage. We can also gather metrics on network usage by individual components which
might be useful in a number of traffic engineering use cases. Where appropriate we can also scrape
hypervisor and container manager logs to get an idea of how resources are allocated and released over
time.
To avoid introducing delays to the application request processing flow, we implement
all Roots data collecting agents as asynchronous tasks. That is, none of them would
suspend application request processing to report data to the data storage components.
We make sure that all expensive I/O tasks related to data collection and storage are
executed out of the request processing flow.
In particular, all data is collected into log files or memory buffers that are local to the components being
monitored. This locally collected (or buffered) data is periodically sent
to the data storage components of Roots using separate background tasks and batch communication
operations. Also special care is taken to isolate the activities in the cloud from potential
failures in the Roots data collection or storage components.
\subsubsection{Data Storage}
The Roots data storage is a database that supports persistently storing monitoring data, and running
queries on them.
Cloud providers have the freedom to implement this component in any way they see fit, as long
as it scales to the number of applications deployed in the cloud platform. Most data retrieval queries executed
by Roots use application and time intervals as indices. Therefore a database that can index monitoring
data by application, and then organize records by timestamp will greatly improve the query performance.
It is also acceptable to remove old monitoring data to make room for more recent events, since Roots
is performing anomaly detection using the most recent data in near realtime.
\subsubsection{Data Analysis}
Roots data analysis component uses two basic abstractions: \textit{anomaly detectors}
and \textit{anomaly handlers}.
Anomaly detectors are processes that periodically analyze the data collected for
each deployed application. Roots supports multiple detector implementations, where each implementation
uses a different statistical method to look for performance anomalies. Detectors are configured
per-application, making it possible for different applications to use different anomaly
detectors. Roots also supports multiple concurrent anomaly detectors on the same application, which can be used
to evaluate the efficiency of different detection strategies for any given application. Each
anomaly detector has an execution schedule (e.g. run every 60 seconds), and a sliding window
(e.g. from 10 minutes ago to now)
associated with it. The boundaries of the window determines the time range
of the data processed by the detector at any round of execution. Window is updated
after each round of execution.
%Our anomaly detector abstraction is general
%enough to support detecting a wide range of anomalies. However, in our work we
%mainly focus on anomaly detectors that check for violations of performance SLOs.
When an anomaly detector finds an anomaly in application performance, it sends an event
to a collection of anomaly handlers. The event encapsulates a unique anomaly identifier,
timestamp, application identifier and the source detector's sliding window that correspond to the
anomaly. Anomaly handlers are configured globally (i.e. each handler
receives events from all detectors), but each handler can can be programmed to handle only
certain types of events. Furthermore, they can fire their own events, which are also delivered to
all the listening anomaly handlers. Similar to detectors, Roots supports multiple anomaly handler
implementations -- one for logging anomalies, one for sending alert emails, one
for updating a dashboard etc. Additionally, Roots provides two special anomaly handler
implementations: a workload change analyzer, and a bottleneck identifier.
The communication between detectors and handlers can be efficiently implemented
using shared memory as explained in section ~\ref{sec:process_mgt}.
The ability of anomaly handlers to fire their own events, coupled with their support
for responding to a filtered subset of incoming events enables constructing
elaborate event flows with sophisticated logic. For example, the workload
change analyzer can run some analysis upon receiving an anomaly event
from any anomaly detector. If an anomaly cannot be associated with a workload
change, it can fire a different type of event. The bottleneck identifier, can
be programmed to only execute its analysis upon receiving this second type of event.
This way we perform the workload change analysis first, and perform the
systemwide bottleneck identification only when it is required to do so.
Both the anomaly detectors and anomaly handlers work with fixed-sized sliding windows.
They can discard any old data as the sliding window moves along the time line.
Therefore the amount of state information these entities must keep in memory has
a strict upper bound.
The extensibility of Roots is primarily achieved through the abstractions of anomaly
detectors and handlers. Roots makes it simple to implement new detectors and handlers,
and plug them into the system. Both the detectors and the handlers are executed
as lightweight processes that do not interfere with the rest of the processes in
the cloud platform. Failures in detectors and handlers have no impact
on the cloud platform or the deployed applications.
\subsubsection{Roots Process Management}
\label{sec:process_mgt}
\begin{figure}
\centering
\includegraphics[scale=0.45]{roots_pod}
\caption{Anatomy of a Roots pod. The diagram shows 2 application benchmarking processes (B),
3 anomaly detectors (D), and 2 handlers (H). Processes communicate via a shared
memory communication bus local to the pod.}
\label{fig:roots_pod}
\end{figure}
Most data collection activities in Roots can be treated as passive -- i.e. they
happen automatically as the applications receive and process requests in the cloud
platform. They do not require explicit scheduling or management. In contrast,
application benchmarking and data analysis are active processes that require
explicit scheduling and management. This is achieved by grouping benchmarking
and data analysis processes into units called Roots pods.
Each Roots pod is responsible for starting and maintaining a preconfigured set of
benchmarkers and data analysis processes (i.e. anomaly detectors and handlers).
Each of these processes are light enough, so as to pack a large number of them
into a single pod. Pods are self-contained entities, and there is no inter-communication
between pods. Processes in a pod can efficiently communicate with each other
using shared memory, and call out to the central Roots data storage to retrieve
collected performance data for analysis. This enables starting and stopping
Roots pods with minimal impact on the overall monitoring system. Furthermore, pods
can be replicated for high availability, and application load can be distributed
among multiple pods for scalability.
Figure~\ref{fig:roots_pod} illustrates a Roots pod monitoring two applications.
It consists of two benchmarking processes, three anomaly detectors and
two anomaly handlers. The anomaly detectors and handlers communicate
using an internal shared memory communication bus, so that events triggered by one anomaly
detector flow into all handlers.
To automate the process of managing pods, they can be tied into the core
process management framework of the PaaS cloud. That way whenever the cloud
platform initializes, a collection of pods can be started automatically.
Application deployment process of the PaaS cloud can be augmented
to register each new application with one of the available pods, so that the
benchmarkers and anomaly detectors can start running on the application.
Moreover, pods can be moved around or restarted as needed in response
to errors and autoscaling events that occur in the cloud platform.
| {
"alphanum_fraction": 0.8228449141,
"avg_line_length": 64.7316017316,
"ext": "tex",
"hexsha": "129ebc638877225e661a05006e068f4e842acbd3",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-05-25T02:59:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-25T02:59:15.000Z",
"max_forks_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_forks_repo_path": "Eager/paper/www17/architecture.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_issues_repo_path": "Eager/paper/www17/architecture.tex",
"max_line_length": 111,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_stars_repo_path": "Eager/paper/www17/architecture.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-16T18:20:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-06-12T01:18:49.000Z",
"num_tokens": 2879,
"size": 14953
} |
\section{Sort}
| {
"alphanum_fraction": 0.6875,
"avg_line_length": 5.3333333333,
"ext": "tex",
"hexsha": "a622c44d19966133f57b30bd4b0b4b614f2a2465",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/geometry/manifoldsRiemann/03-00-Sort.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/geometry/manifoldsRiemann/03-00-Sort.tex",
"max_line_length": 14,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/geometry/manifoldsRiemann/03-00-Sort.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6,
"size": 16
} |
\documentclass[a4paper]{article}
\usepackage{brent_cv}
\usepackage{pifont}
\renewcommand{\refname}{Publications \hrulefill}
\makeatletter
\newcommand \brentfill {
\tiny
\leavevmode \cleaders
\hb@xt@ .5em{\hss \textperiodcentered \hss }\hfill \kern \z@
\normalsize
}
\makeatother
\pagenumbering{gobble}
\newcommand{\isp}{:$\ $}
\newcommand{\bbull}{\ding{118}}
\begin{document}
\noindent
\begin{minipage}[b][1cm]{.7\textwidth}
\Huge\textbf{Brent Moran} \\
\normalsize\emph{Data Engineering \& Analytics}
\end{minipage}
\begin{minipage}[b][1cm]{.4\textwidth}
\raggedleft
\ttfamily
\href{mailto:[email protected]}{[email protected]} \\
\href{https://github.com/mathemancer}{github.com/mathemancer}
\end{minipage}
\subsection*{Relevant Work Experience \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Senior Data Engineer}
\emph{Center of Complex Interventions}, Remote \brentfill{} 2020-now\\
Researching innovation in the web search space and other data related
projects.
\item[\bbull] \textbf{Senior Data Engineer} \emph{Creative Commons},
Remote \brentfill{} 2019-2020\\
Development and maintenance of data pipelines to support CC Search, a media
search engine. Research into impact of organizations using CC licenses.
Maintainer of CC Catalog project. Lead internships related to all of the
above.
\item[\bbull] \textbf{Full Stack Developer} \emph{Metronom GmbH},
Berlin \brentfill{} 2018-19\\
Implementation and maintenance of an internal web app for professional
users. Development and improvement of pricing algorithms. Implementation of
data pipeline in GCP.
\item[\bbull] \textbf{Big Data Engineer \& Analyst} \emph{Haensel AMS
GmbH}, Berlin \brentfill{} 2017-18\\
Data analysis, Python development, and development on the AWS cloud.
Set up and test different algorithms.
\end{itemize}
\subsection*{Education \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Master of Science} \emph{Freie
Universit\"at Berlin} \brentfill{} 2018\\
Thesis topic: Polynomial bounds on grid-minor theorem
\item[\bbull] \textbf{Bachelor of Science, Summa Cum Laude} \emph{University
of Colorado, Denver} \brentfill{} 2015\\
Major: Mathematics \\
Minor: Economics
\item[\bbull] \emph{Truman State University}, Kirksville,
Missouri \brentfill{} 2003-06\\
Studied music composition and analysis
\end{itemize}
\subsection*{Interesting Projects \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Linked Commons Graph Analysis} \brentfill{} 2020-now\\
Leading an internship to determine impact of CC licenses as well as
organizations which use them from the
\href{http://dataviz.creativecommons.engineering/}{Linked Commons} graph
data set.
\item[\bbull] \textbf{CC Catalog}
\href{https://github.com/creativecommons/cccatalog}{(Click for Github repo)}
\brentfill{} 2019-now\\
Maintaining the CC Catalog project to gather and index metadata about
hundreds of millions of images from 3rd party APIs as well as Common Crawl.
The metadata is then transformed, cleaned, and loaded into a PostgreSQL DB
for use in CC Search.
\item[\bbull] \textbf{KVI Recommendations on GCP} \brentfill{} 2019\\
Extended a data processing job to choose appropriate Key Value Items for
special competitor-based pricing strategies. Migrated processing job from
internal cloud solution to GCP.
\item[\bbull] \textbf{Dynamic content via AI} \brentfill{} 2018\\
Designed, implemented, and deployed to production a serverless, AI-driven API
allowing a client website to provide dynamic content to a user based on
that user's past behavior.
\item[\bbull] \textbf{ETL Pipeline on AWS} \brentfill{} 2017\\
Participated in the design and implementation of a serverless ETL
(Extract, Transform, and Load) pipeline composed of AWS Lambda
functions and Athena Queries (started by Lambda functions in most
cases), controlled and sequenced by a finite state machine (AWS Step
Function).
\item[\bbull] \textbf{Social Dynamics Simulation (2015):}
Designed and implemented a simulation of turnover (churn) present in
a fictional company, for the purposes of analyzing the effect
different hiring/promotion/firing policies have on employee outcomes
in a hierarchical corporate setting.
\item[\bbull] \textbf{Network influence analysis (2014):}
Designed and implemented a web crawling program in order to generate
citation networks from data on MathSciNet. Analyzed these networks in order
to measure the academic influence of various mathematics papers.
\item[\bbull] \textbf{Cellular automata (2014):}
Designed and implemented a simulation of world urbanization consisting of a
cellular automaton underlying an agent-based simulation.
\end{itemize}
\subsection*{Technical Skills \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Programming:} Python, PySpark, Golang,
JavaScript, Java (Spring Boot), Scala (Play)
\item[\bbull] \textbf{Querying:} PostgreSQL, MySQL, AWS Athena, Google
BigQuery
\item[\bbull] \textbf{Cloud Providers:} Amazon Web Services, Google
Cloud Platform
\item[\bbull] \textbf{Operating Systems:} Linux, MacOS
\item[\bbull] \textbf{Other:} bash, Git, Apache Airflow, Docker, Kubernetes,
Redis, Jenkins, \LaTeX
\end{itemize}
\subsection*{Conference Talks \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Joint Mathematics Meetings:}
San Antonio, Texas \brentfill{} 2015 \\
\emph{Ramsey-Minimal Saturation Number for Families of Stars}
\item[\bbull] \textbf{MAA Mathfest:}
Portland, Oregon \brentfill{} 2014 \\
\emph{The 1-Relaxed Modular Edge-sum Labeling Game}
\item[\bbull] \textbf{PPRUMC:}
Colorado Springs, Colorado \brentfill{} 2014 \\
\emph{Ramsey-Minimal Saturation Number for Families of Stars}
\end{itemize}
\begingroup
\renewcommand{\section}[2]{\subsection#1{#2}}%
\nocite{brandt:local,butler:forest}
\bibliographystyle{plain}
\bibliography{cvbib.bib}
\endgroup
\subsection*{Other Research Experience \hrulefill}
\begin{itemize}
\item[\bbull] \textbf{Willamette Valley REU-RET Consortium for
Mathematics Research} \\
\emph{1-Relaxed Modular Edge-sum Labeling Game Number}
Supervised by Charles Dunn and Jennifer Nordstrom of Linfield
College, during this REU in competitive graph coloring, we developed
a new graph labeling scheme based on modular arithmetic, and proved
a number of results regarding our scheme.
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7400212475,
"avg_line_length": 41.4402515723,
"ext": "tex",
"hexsha": "0efdd20398252a030c16674ceb8b821013354251",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "be9ee25d72aa5efc40848e1f1b2d0966698b8712",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mathemancer/mathemancer.github.io",
"max_forks_repo_path": "resume/brent_resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "be9ee25d72aa5efc40848e1f1b2d0966698b8712",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mathemancer/mathemancer.github.io",
"max_issues_repo_path": "resume/brent_resume.tex",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "be9ee25d72aa5efc40848e1f1b2d0966698b8712",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mathemancer/mathemancer.github.io",
"max_stars_repo_path": "resume/brent_resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1851,
"size": 6589
} |
This chapter presents some final thoughts and reflections on the results
and summarizes the previously discussed conclusions. A summary of my
proposed solution for testing a web application is also presented.\\
\section{General conclusions}
\begin{itemize}
\item Working with the chosen testing frameworks (RSpec and Jasmine)
has worked very well for testing the GOLI application in particular. I
believe that it would also work very well for testing most similar web
applications.\\
\item The experience of using test-driven development as well as some
ideas originating from behavior-driven development has been pleasant.
While this is a subjective opinion, I believe that this might be the
case for many other software developers and for other projects as well.
I have also found that the newly implemented functionality is well
tested and that its test efficiency is high, which may be another
benefit from using these methodologies. Writing tests using a
ubiquitous language was however not beneficial for this particular
application.\\
\item The combination of many integration- and unit tests complemented
by a few browser tests was successful for this project. It might however
be hard to determine the best level of testing for certain
functionality.\\
\item The level of testing and the combination of different kinds of
tests basically depends on the application. It is not possible to say
that writing higher-level unit- or integration- test is generally worse
than writing lower-level unit tests, nor the contrary. Using browser
tests as the primary testing method is probably not the best solution
for most projects, however.\\
\item A test suite can speed up significantly by using factories to
crate data before each test rather than manually create data for all
tests when the test suite is initialized. While tests using the database
are still not in the same magnitude of speed as low-level unit tests,
they are still fast enough to be usable for using test-driven
development methodologies.\\
\item Using metrics for test efficiency such as test coverage is usable
for finding parts of the code that lacks testing, in order to make tests
better. I believe that using statement test coverage may work well
practice, even if branch coverage is more helpful and returns more fair
coverage percentages. The increased test efficiency could possibly lead
to finding more software defects due to more efficient tests, but I have
not found any defects by using test efficiency metrics in this
project.\\
\end{itemize}
\newpage
\section{Suggested solution for testing a web application}
For a web application with at least some amount of client-side and
server-side code, I would recommend unit testing for the client as well
as the server. In addition, system-level browser tests should be used.
Most of the written tests should be on unit- and integration-
tests, but the exact proportions depend on the application.\\
I would recommend applications that use Javascript or CoffeeScript on
the client side to use the Jasmine testing framework together with the
Karma test runner. For testing the client-side in Rails projects,
Teaspoon could be an alternative to Karma, which is discussed in
\fref{sec:js_test}.\\
A server-side written in Ruby on Rails could be tested using RSpec, with
factory\_girl for generating test data. Rails controllers can preferably
be tested using higher-level tests, while model instance methods often
can be tested using lower-level unit tests. This is discussed more
thoroughly in \fref{sec:ruby_test}.\\
Selenium is highly useful for system-level tests. I would however
recommend using a higher-level framework such as Capybara rather than
using Selenium directly. The page object pattern should also be used.
SitePrism is useful for this purpose. These frameworks are discussed
further in \fref{sec:choices_browser}.\\
| {
"alphanum_fraction": 0.8113402062,
"avg_line_length": 47.9012345679,
"ext": "tex",
"hexsha": "9772690897db5214eb214086c39cc6bd0a250735",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nip3o/master-thesis",
"max_forks_repo_path": "rapporter/final-report/conclusions/conclusions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nip3o/master-thesis",
"max_issues_repo_path": "rapporter/final-report/conclusions/conclusions.tex",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0c830a8590a95a95d546616331d6784b78149666",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nip3o/master-thesis",
"max_stars_repo_path": "rapporter/final-report/conclusions/conclusions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 799,
"size": 3880
} |
% Copyright 2019 by Till Tantau
%
% This file may be distributed and/or modified
%
% 1. under the LaTeX Project Public License and/or
% 2. under the GNU Free Documentation License.
%
% See the file doc/generic/pgf/licenses/LICENSE for more details.
\section{Polar Axes}
\label{section-dv-polar}
\subsection{Overview}
\begin{tikzlibrary}{datavisualization.polar}
This library contains keys that allow you to create plots in a polar axis
system is used.
\end{tikzlibrary}
In a \emph{polar axis system} two attributes are visualized by displacing a
data point as follows: One attribute is used to compute a an angle (a
direction) while a second attribute is used as a radius (a distance). The angle
can be measured in degrees, radians, or can be scaled arbitrarily.
%
\begin{codeexample}[
width=8.5cm,
preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}},
]
\tikz \datavisualization [
scientific polar axes={0 to pi, clean},
all axes=grid,
style sheet=vary hue,
legend=below
]
[visualize as smooth line=sin,
sin={label in legend={text=$1+\sin \alpha$}}]
data [format=function] {
var angle : interval [0:pi];
func radius = sin(\value{angle}r) + 1;
}
[visualize as smooth line=cos,
cos={label in legend={text=$1+\cos\alpha$}}]
data [format=function] {
var angle : interval [0:pi];
func radius = cos(\value{angle}r) + 1;
};
\end{codeexample}
Most of the time, in order to create a polar axis system, you will just use the
|scientific polar axes| key, which takes a number of options that allow you to
configure the axis system in greater detail. This key is documented in
Section~\ref{section-dv-sci-polar-axes}. Internally, this key uses more low
level keys which are documented in the en suite sections.
It is worthwhile to note that the axes of a polar axis system are, still,
normal axes of the data visualization system. In particular, all the
configurations possible for, say, Cartesian axes also apply to the ``angle
axis'' and the ``radius axis'' of a polar axis system. For instance, you can
could make both axes logarithmic or style their ticks:
%
\begin{codeexample}[preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}}]
\tikz[baseline] \datavisualization [
scientific axes={clean},
x axis={attribute=angle, ticks={minor steps between steps=4}},
y axis={attribute=radius, ticks={some, style=red!80!black}},
all axes=grid,
visualize as smooth line=sin]
data [format=function] {
var t : interval [-3:3];
func angle = exp(\value t);
func radius = \value{t}*\value{t};
};
\qquad
\tikz[baseline] \datavisualization [
scientific polar axes={right half clockwise, clean},
angle axis={logarithmic,
ticks={
minor steps between steps=8,
major also at/.list={2,3,4,5,15,20}}},
radius axis={ticks={some, style=red!80!black}},
all axes=grid,
visualize as smooth line=sin]
data [format=function] {
var t : interval [-3:3];
func angle = exp(\value t);
func radius = \value{t}*\value{t};
};
\end{codeexample}
\subsection{Scientific Polar Axis System}
\label{section-dv-sci-polar-axes}
\begin{key}{/tikz/data visualization/scientific polar axes=\meta{options}}
This key installs a polar axis system that can be used in a ``scientific''
publication. Two axes are created called the |angle axis| and the
|radius axis|. Unlike ``normal'' Cartesian axes, these axes do not point in
a specific direction. Rather, the |radius axis| is used to map the values
of one attribute to a distance from the origin while the |angle axis| is
used to map the values of another attribute to a rotation angle.
The \meta{options} will be executed with the path prefix
%
\begin{codeexample}[code only]
/tikz/data visualization/scientific polar axes
\end{codeexample}
%
The permissible keys are documented in the later subsections of this
section.
Let us start with the configuration of the radius axis since it is easier.
Firstly, you should specify which attribute is linked to the radius. The
default is |radius|, but you will typically wish to change this. As with
any other axis, the |attribute| key is used to configure the axis, see
Section~\ref{section-dv-axis-attribute} for details. You can also apply all
other configurations to the radius axis like, say, |unit length| or
|length| or |style|. Note, however, that the |logarithmic| key will not
work with the radius axis for a |scientific polar axes| system since the
attribute value zero is always placed at the center -- and for a
logarithmic plot the value |0| cannot be mapped.
%
\begin{codeexample}[
width=8.8cm,
preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}},
]
\tikz \datavisualization [
scientific polar axes,
radius axis={
attribute=distance,
ticks={step=5000},
padding=1.5em,
length=3cm,
grid
},
visualize as smooth line]
data [format=function] {
var angle : interval [0:100];
func distance = \value{angle}*\value{angle};
};
\end{codeexample}
For the |angle axis|, you can also specify an attribute using the
|attribute| key. However, for this axis the mapping of a value to an actual
angle is a complicated process involving many considerations of how the
polar axis system should be visualized. For this reason, there are a large
number of predefined such mappings documented in
Section~\ref{section-dv-angle-ranges}. Finally, as for a |scientific plot|,
you can configure where the ticks should be shown using the keys
|inner ticks|, |outer ticks|, and |clean|, documented below.
\end{key}
\subsubsection{Tick Placements}
\begin{key}{/tikz/data visualization/scientific polar axes/outer ticks}
This key, which is the default, causes ticks to be drawn ``outside'' the
outer ``ring'' of the polar axes:
%
\begin{codeexample}[
width=8.8cm,
preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}},
]
\tikz \datavisualization [
scientific polar axes={outer ticks, 0 to 180},
visualize as smooth line]
data [format=function] {
var angle : interval [0:100];
func radius = \value{angle};
};
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/data visualization/scientific polar axes/inner ticks}
This key causes the ticks to be ``turned to the inside''. I do not
recommend using this key.
%
\begin{codeexample}[
width=8.8cm,
preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}},
]
\tikz \datavisualization [
scientific polar axes={inner ticks, 0 to 180},
visualize as smooth line]
data [format=function] {
var angle : interval [0:100];
func radius = \value{angle};
};
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/data visualization/scientific polar axes/clean}
This key separates the area where the data is shown from the area where the
ticks are shown. Usually, this is the best choice for the tick placement
since it avoids a collision of data and explanations.
%
\begin{codeexample}[
width=8.8cm,
preamble={\usetikzlibrary{
datavisualization.formats.functions,
datavisualization.polar,
}},
]
\tikz \datavisualization [
scientific polar axes={clean, 0 to 180},
visualize as smooth line]
data [format=function] {
var angle : interval [0:100];
func radius = \value{angle};
};
\end{codeexample}
%
\end{key}
\subsubsection{Angle Ranges}
\label{section-dv-angle-ranges}
Suppose you create a polar plot in which the radius values vary between, say,
$567$ and $1234$. Then the normal axis scaling mechanisms can be used to
compute a good scaling for the ``radius axis'': Place the value $1234$ at a
distance of , say, $5\,\mathrm{cm}$ from the origin and place the value $0$ at
the origin. Now, by comparison, suppose that the values of the angle axis's
attribute ranged between, say, $10$ and $75.7$. In this case, we may wish the
angles to be scaled so that the minimum value is horizontal and the maximum
value is vertical. But we may also wish the a value of $0$ is horizontal and a
value of $90$ is vertical.
Since it is unclear which interpretation is the right one, you have to use an
option to select which should happen. The applicable options fall into three
categories:
%
\begin{itemize}
\item Options that request the scaling to be done in such a way that the
attribute is interpreted as a value in degrees and such that the
minimum and maximum of the depicted range is a multiple of $90^\circ$.
For instance, the option |0 to 180| causes the angle axis to range from
$0^\circ$ to $180^\circ$, independently of the actual range of the
values.
\item Options that work as above, but use radians rather than degrees. An
example is the option |0 to pi|.
\item Options that map the minimum value in the data to a horizontal or
vertical line and the maximum value to another such line. This is
useful when the values neither directly correspond to degrees or
radians. In this case, the angle axis may also be a logarithmic axis.
\end{itemize}
In addition to the above categories, all of the option documented in the
following implicitly also select quadrants that are used to depict the data.
For instance, the |0 to 90| key and also the |0 to pi half| key setup the polar
axis system in such a way that only first (upper right) quadrant is used. No
check is done whether the data fill actually lie in this quadrant -- if it does
not, the data will ``bleed outside'' the range. Naturally, with a key like
|0 to 360| or |0 to 2pi| this cannot happen.
In order to save some space in this manual, in the following the different
possible keys are only given in a table together with a small example for each
key. The examples were created using the following code:
%
\begin{codeexample}[preamble={\usetikzlibrary{datavisualization.polar}}]
\tikz \datavisualization [
scientific polar axes={
clean,
0 to 90 % the option
},
angle axis={ticks={step=30}},
radius axis={length=1cm, ticks={step=1}},
visualize as scatter]
data point [angle=20, radius=0.5]
data point [angle=30, radius=1]
data point [angle=40, radius=1.5];
\end{codeexample}
For the options on radians, the angle values have been replaced by |0.2|,
|0.3|, and |0.4| and the stepping has been changed by setting |step=(pi/6)|.
For the quadrant options, no stepping is set at all (it is computed
automatically).
\def\polarexample#1#2#3#4#5{%
\texttt{#1}%
\indexkey{/tikz/data visualization/scientific polar axes/#1}&
\tikz [baseline]{\path(-2.25cm,0)(2.25cm,0); \datavisualization [
scientific polar axes={clean, #1},
angle axis={ticks={#2}},
radius axis={length=1cm, ticks={step=1}},
visualize as scatter
]
data point [angle=#3, radius=0.5]
data point [angle=#4, radius=1]
data point [angle=#5, radius=1.5];
\path ([yshift=-1em]current bounding box.south);
}&
\tikz [baseline]{\path(-2.25cm,0)(2.25cm,0); \datavisualization [
scientific polar axes={outer ticks, #1},
angle axis={ticks={#2}},
radius axis={length=1cm, ticks={step=1}},
visualize as scatter
]
data point [angle=#3, radius=0.5]
data point [angle=#4, radius=1]
data point [angle=#5, radius=1.5];
\path ([yshift=-1em]current bounding box.south);
}
\\
}
\begin{tabular}{lcc}
\emph{Option} & \emph{With clean ticks} & \emph{With outer ticks} \\
\polarexample{0 to 90}{step=30}{20}{30}{40}
\polarexample{-90 to 0}{step=30}{20}{30}{40}
\polarexample{0 to 180}{step=30}{20}{30}{40}
\polarexample{-90 to 90}{step=30}{20}{30}{40}
\polarexample{0 to 360}{step=30}{20}{30}{40}
\polarexample{-180 to 180}{step=30}{20}{30}{40}
\end{tabular}
\begin{tabular}{lcc}
\emph{Option} & \emph{With clean ticks} & \emph{With outer ticks} \\
\polarexample{0 to pi half}{step=(pi/6)}{0.2}{0.3}{0.4}
\polarexample{-pi half to 0}{step=(pi/6)}{0.2}{0.3}{0.4}
\polarexample{0 to pi}{step=(pi/6)}{0.2}{0.3}{0.4}
\polarexample{-pi half to pi half}{step=(pi/6)}{0.2}{0.3}{0.4}
\polarexample{0 to 2pi}{step=(pi/6)}{0.2}{0.3}{0.4}
\polarexample{-pi to pi}{step=(pi/6)}{0.2}{0.3}{0.4}
\end{tabular}
\begin{tabular}{lcc}
\emph{Option} & \emph{With clean ticks} & \emph{With outer ticks} \\
\polarexample{quadrant}{}{20}{30}{40}
\polarexample{quadrant clockwise}{}{20}{30}{40}
\polarexample{fourth quadrant}{}{20}{30}{40}
\polarexample{fourth quadrant clockwise}{}{20}{30}{40}
\polarexample{upper half}{}{20}{30}{40}
\polarexample{upper half clockwise}{}{20}{30}{40}
\polarexample{lower half}{}{20}{30}{40}
\polarexample{lower half clockwise}{}{20}{30}{40}
\end{tabular}
\begin{tabular}{lcc}
\emph{Option} & \emph{With clean ticks} & \emph{With outer ticks} \\
\polarexample{left half}{}{20}{30}{40}
\polarexample{left half clockwise}{}{20}{30}{40}
\polarexample{right half}{}{20}{30}{40}
\polarexample{right half clockwise}{}{20}{30}{40}
\end{tabular}
\subsection{Advanced: Creating a New Polar Axis System}
\begin{key}{/tikz/data visualization/new polar axes=|\char`\{|\meta{angle axis name}|\char`\}||\char`\{|\meta{radius axis name}|\char`\}|}
This key actually creates two axes, whose names are give as parameters: An
\emph{angle axis} and a \emph{radius axis}. These two axes work in concert
in the following way: Suppose a data point has two attributes called
|angle| and |radius| (these attribute names can be changed by changing the
|attribute| of the \meta{angle axis name} or the \meta{radius axis name},
respectively). These two attributes are then scaled as usual, resulting in
two ``reasonable'' values $a$ (for the angle) and $r$ (for the radius).
Then, the data point gets visualized (in principle, details will follow) at
a position on the page that is at a distance of $r$ from the origin and at
an angle of~$a$.
%
\begin{codeexample}[preamble={\usetikzlibrary{datavisualization.polar}}]
\tikz \datavisualization
[new polar axes={angle axis}{radius axis},
radius axis={length=2cm},
visualize as scatter]
data [format=named] {
angle={0,20,...,160}, radius={0,...,5}
};
\end{codeexample}
%
In detail, the \meta{angle axis} keeps track of two vectors $v_0$ and
$v_{90}$, each of which will usually have unit length (length |1pt|) and
which point in two different directions. Given a radius $r$ (measured in
\TeX\ |pt|s, so if the radius attribute |10pt|, then $r$ would be $10$) and
an angle $a$, let $s$ be the sine of $a$ and let $c$ be the cosine of $a$,
where $a$ is a number is degrees (so $s$ would be $1$ for $a = 90$). Then,
the current page position is shifted by $c \cdot r$ times $v_0$ and,
additionally, by $s \cdot r$ times $v_{90}$. This means that in the ``polar
coordinate system'' $v_0$ is the unit vector along the ``$0^\circ$-axis''
and $v_{90}$ is the unit vector along ``$90^\circ$-axis''. The values of
$v_0$ and $v_{90}$ can be changed using the following key on the
\meta{angle axis}:
%
\begin{key}{/tikz/data visualization/axis options/unit vectors=%
|\char`\{|\meta{unit vector 0 degrees}|\char`\}\char`\{|\meta{unit vector 90 degrees}|\char`\}|
(initially {\char`\{(1pt,0pt)\char`\}\char`\{(0pt,1pt)\char`\}})%
}
Both the \meta{unit vector 0 degrees} and the \meta{unit vector 90 degrees}
are \tikzname\ coordinates:
%
\begin{codeexample}[preamble={\usetikzlibrary{datavisualization.polar}}]
\tikz \datavisualization
[new polar axes={angle axis}{radius axis},
radius axis={unit length=1cm},
angle axis={unit vectors={(10:1pt)}{(60:1pt)}},
visualize as scatter]
data [format=named] {
angle={0,90}, radius={0.25,0.5,...,2}
};
\end{codeexample}
\end{key}
\end{key}
Once created, the |angle axis| can be scaled conveniently using the following
keys:
\begin{key}{/tikz/data visualization/axis options/degrees}
When this key is passed to the angle axis of a polar axis system, it sets
up the scaling so that a value of |360| on this axis corresponds to a
complete circle.
%
\begin{codeexample}[preamble={\usetikzlibrary{datavisualization.polar}}]
\tikz \datavisualization
[new polar axes={angle axis}{radius axis},
radius axis={unit length=1cm},
angle axis={degrees},
visualize as scatter]
data [format=named] {
angle={10,90}, radius={0.25,0.5,...,2}
};
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/data visualization/axis options/radians}
In contrast to |degrees|, this option sets up things so that a value of
|2*pi| on this axis corresponds to a complete circle.
%
\begin{codeexample}[preamble={\usetikzlibrary{datavisualization.polar}}]
\tikz \datavisualization
[new polar axes={angle axis}{radius axis},
radius axis={unit length=1cm},
angle axis={radians},
visualize as scatter]
data [format=named] {
angle={0,1.5}, radius={0.25,0.5,...,2}
};
\end{codeexample}
%
\end{key}
| {
"alphanum_fraction": 0.6938822713,
"avg_line_length": 37.6775599129,
"ext": "tex",
"hexsha": "bbba69dee468f53449c470e28223626970240540",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_forks_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-dv-polar.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_issues_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-dv-polar.tex",
"max_line_length": 138,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_stars_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-dv-polar.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5055,
"size": 17294
} |
% !TEX root = ./Basilisk-MODULENAME-yyyymmdd.tex
\section{Test Description and Success Criteria}
Describe the unit test(s) in here.
\subsection{Check 1}
There could be subsections for various checks done within the unit test.
\section{Test Parameters}
Test and simulation parameters and inputs go here. Basically, describe your test in the section above, but put any specific numbers or inputs to the tests in this section.
The unit test verify that the module output guidance message vectors match expected values.
\begin{table}[htbp]
\caption{Error tolerance for each test.}
\label{tab:errortol}
\centering \fontsize{10}{10}\selectfont
\begin{tabular}{ c | c } % Column formatting,
\hline\hline
\textbf{Output Value Tested} & \textbf{Tolerated Error} \\
\hline
{\tt outputVector} & \input{AutoTeX/toleranceValue} \\
\hline\hline
\end{tabular}
\end{table}
\section{Test Results}
The results of the unit test should be included in the documentation. The results can be discussed verbally, but also included as tables and figures.
All of the tests passed:
\begin{table}[H]
\caption{Test results}
\label{tab:results}
\centering \fontsize{10}{10}\selectfont
\begin{tabular}{c | c } % Column formatting,
\hline\hline
\textbf{Check} &\textbf{Pass/Fail} \\
\hline
1 & \input{AutoTeX/passFail11} \\
2 & \input{AutoTeX/passFail13} \\
3 & \input{AutoTeX/passFail22} \\
\hline\hline
\end{tabular}
\end{table}
\subsection{Unit Test Table Results}
To automatically create a unit test table to include in the documentation, use the command:
\begin{verbatim}
unitTestSupport.writeTableLaTeX(
tableName,
tableHeaders,
caption,
dataMatrix,
path)
\end{verbatim}
Here are the sample \TeX\ table form the unit tests.
\input{AutoTeX/test11.tex}
\input{AutoTeX/test13.tex}
\input{AutoTeX/test22.tex}
\subsection{Unit Test Figure Results}
If figures and plots are generated in the python unit tests, these can be also automatically included in the unit test documentation. This is achieved with the command:
\begin{verbatim}
unitTestSupport.writeFigureLaTeX(
"testPlot",
"Illustration of Sample Plot",
plt,
"width=0.5\\textwidth",
path)
\end{verbatim}
\input{AutoTeX/testPlot11.tex}
\input{AutoTeX/testPlot13.tex}
\input{AutoTeX/testPlot22.tex}
| {
"alphanum_fraction": 0.7353819889,
"avg_line_length": 26.0333333333,
"ext": "tex",
"hexsha": "49419e4300a21a01911bee87535dd24a0f0c9bc5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_forks_repo_licenses": [
"0BSD"
],
"max_forks_repo_name": "ian-cooke/basilisk_mag",
"max_forks_repo_path": "src/fswAlgorithms/_fswTemplateFolder/fswModuleTemplate/_Documentation/secTest.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_issues_repo_issues_event_max_datetime": "2019-03-13T20:52:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-03-13T20:52:22.000Z",
"max_issues_repo_licenses": [
"0BSD"
],
"max_issues_repo_name": "ian-cooke/basilisk_mag",
"max_issues_repo_path": "src/fswAlgorithms/_fswTemplateFolder/fswModuleTemplate/_Documentation/secTest.tex",
"max_line_length": 171,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_stars_repo_licenses": [
"0BSD"
],
"max_stars_repo_name": "ian-cooke/basilisk_mag",
"max_stars_repo_path": "src/fswAlgorithms/_fswTemplateFolder/fswModuleTemplate/_Documentation/secTest.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 665,
"size": 2343
} |
\chapter{Writing ImageIO Plugins}
\label{chap:writingplugins}
\section{Plugin Introduction}
\label{sec:pluginintro}
As explained in Chapters~\ref{chap:imageinput} and
\ref{chap:imageoutput}, the ImageIO library does not know how to read or
write any particular image formats, but rather relies on plugins located
and loaded dynamically at run-time. This set of plugins, and therefore
the set of image file formats that \product or its clients can read and
write, is extensible without needing to modify \product itself.
This chapter explains how to write your own \product plugins. We will
first explain separately how to write image file readers and writers,
then tie up the loose ends of how to build the plugins themselves.
\section{Image Readers}
\label{sec:pluginreaders}
A plugin that reads a particular image file format must implement a
\emph{subclass} of \ImageInput (described in
Chapter~\ref{chap:imageinput}). This is actually very straightforward
and consists of the following steps, which we will illustrate with a
real-world example of writing a JPEG/JFIF plug-in.
\begin{enumerate}
\item Read the base class definition from {\fn imageio.h}. It may also
be helpful to enclose the contents of your plugin in the same
namespace that the \product library uses:
\begin{code}
#include <OpenImageIO/imageio.h>
OIIO_PLUGIN_NAMESPACE_BEGIN
... everything else ...
OIIO_PLUGIN_NAMESPACE_END
\end{code}
\item Declare three public items:
\begin{enumerate}
\item An integer called \emph{name}{\cf _imageio_version} that identifies
the version of the ImageIO protocol implemented by the plugin,
defined in {\fn imageio.h} as the constant {\cf OIIO_PLUGIN_VERSION}.
This allows the library to be sure it is not loading a plugin
that was compiled against an incompatible version of \product.
\item A function named \emph{name}{\cf _input_imageio_create} that
takes no arguments and returns a new instance of your \ImageInput
subclass. (Note that \emph{name} is the name of your format,
and must match the name of the plugin itself.)
\item An array of {\cf char *} called \emph{name}{\cf _input_extensions}
that contains the list of file extensions that are likely to indicate
a file of the right format. The list is terminated by a {\cf NULL}
pointer.
\end{enumerate}
All of these items must be inside an `{\cf extern "C"}' block in order
to avoid name mangling by the C++ compiler, and we provide handy
macros {\cf OIIO_PLUGIN_EXPORTS_BEGIN} and {\cf OIIO_PLUGIN_EXPORTS_END}
to make this easy. Depending on your
compiler, you may need to use special commands to dictate that the
symbols will be exported in the DSO; we provide a special {\cf
OIIO_EXPORT} macro for this purpose, defined in {\fn export.h}.
Putting this all together, we get the following for our JPEG example:
\begin{code}
OIIO_PLUGIN_EXPORTS_BEGIN
OIIO_EXPORT int jpeg_imageio_version = OIIO_PLUGIN_VERSION;
OIIO_EXPORT JpgInput *jpeg_input_imageio_create () {
return new JpgInput;
}
OIIO_EXPORT const char *jpeg_input_extensions[] = {
"jpg", "jpe", "jpeg", NULL
};
OIIO_PLUGIN_EXPORTS_END
\end{code}
\item The definition and implementation of an \ImageInput subclass for
this file format. It must publicly inherit \ImageInput, and must
overload the following methods which are ``pure virtual'' in the
\ImageInput base class:
\begin{enumerate}
\item {\cf format_name()} should return the name of the format, which
ought to match the name of the plugin and by convention is
strictly lower-case and contains no whitespace.
\item {\cf open()} should open the file and return true, or should
return false if unable to do so (including if the file was found
but turned out not to be in the format that your plugin is trying
to implement).
\item {\cf close()} should close the file, if open.
\item {\cf read_native_scanline} should read a single scanline from
the file into the address provided, uncompressing it but
keeping it in its native data format without any translation.
\item The virtual destructor, which should {\cf close()} if the file
is still open, addition to performing any other tear-down activities.
\end{enumerate}
Additionally, your \ImageInput subclass may optionally choose to
overload any of the following methods, which are defined in the
\ImageInput base class and only need to be overloaded if the default
behavior is not appropriate for your plugin:
\begin{enumerate}
\item[(f)] {\cf supports()}, only if your format supports any of
the optional features described in
Section~\ref{sec:inputsupportsfeaturelist}.
\item[(g)] {\cf valid_file()}, if your format has a way to
determine if a file is of the given format in a way that is less
expensive than a full {\cf open()}.
\item[(h)] {\cf seek_subimage()}, only if your format supports
reading multiple subimages within a single file.
\item[(i)] {\cf read_native_scanlines()}, only if your format has a speed
advantage when reading multiple scanlines at once. If you do not
supply this function, the default implementation will simply call
{\cf read_scanline()} for each scanline in the range.
\item[(j)] {\cf read_native_tile()}, only if your format supports
reading tiled images.
\item[(k)] {\cf read_native_tiles()}, only if your format supports
reading tiled images and there is a speed advantage when reading
multiple tiles at once. If you do not supply this function, the
default implementation will simply call {\cf read_native_tile()} for each
tile in the range.
\item[(l)] ``Channel subset'' versions of {\cf read_native_scanlines()}
and/or {\cf read_native_tiles()}, only if your format has a more
efficient means of reading a subset of channels. If you do not
supply these methods, the default implementation will simply use
{\cf read_native_scanlines()} or {\cf read_native_tiles()} to read
into a temporary all-channel buffer and then copy the channel
subset into the user's buffer.
\item[(m)] {\cf read_native_deep_scanlines()} and/or
{\cf read_native_deep_tiles()}, only if your format supports
``deep'' data images.
\end{enumerate}
Here is how the class definition looks for our JPEG example. Note
that the JPEG/JFIF file format does not support multiple subimages
or tiled images.
\begin{code}
class JpgInput : public ImageInput {
public:
JpgInput () { init(); }
virtual ~JpgInput () { close(); }
virtual const char * format_name (void) const { return "jpeg"; }
virtual bool open (const std::string &name, ImageSpec &spec);
virtual bool read_native_scanline (int y, int z, void *data);
virtual bool close ();
private:
FILE *m_fd;
bool m_first_scanline;
struct jpeg_decompress_struct m_cinfo;
struct jpeg_error_mgr m_jerr;
void init () { m_fd = NULL; }
};
\end{code}
\end{enumerate}
Your subclass implementation of {\cf open()}, {\cf close()}, and {\cf
read_native_scanline()} are the heart of an \ImageInput
implementation. (Also {\cf read_native_tile()} and {\cf
seek_subimage()}, for those image formats that support them.)
The remainder of this section simply lists the full implementation of
our JPEG reader, which relies heavily on the open source {\fn jpeg-6b}
library to perform the actual JPEG decoding.
\includedcode{../jpeg.imageio/jpeginput.cpp}
\section{Image Writers}
\label{sec:pluginwriters}
A plugin that writes a particular image file format must implement a
\emph{subclass} of \ImageOutput (described in
Chapter~\ref{chap:imageoutput}). This is actually very straightforward
and consists of the following steps, which we will illustrate with a
real-world example of writing a JPEG/JFIF plug-in.
\begin{enumerate}
\item Read the base class definition from {\fn imageio.h}, just as
with an image reader (see Section~\ref{sec:pluginreaders}).
\item Declare three public items:
\begin{enumerate}
\item An integer called \emph{name}{\cf _imageio_version} that identifies
the version of the ImageIO protocol implemented by the plugin,
defined in {\fn imageio.h} as the constant {\cf OIIO_PLUGIN_VERSION}.
This allows the library to be sure it is not loading a plugin
that was compiled against an incompatible version of \product.
Note that if your plugin has both a reader and writer and they
are compiled as separate modules (C++ source files), you don't
want to declare this in \emph{both} modules; either one is fine.
\item A function named \emph{name}{\cf _output_imageio_create} that
takes no arguments and returns a new instance of your \ImageOutput
subclass. (Note that \emph{name} is the name of your format,
and must match the name of the plugin itself.)
\item An array of {\cf char *} called \emph{name}{\cf _output_extensions}
that contains the list of file extensions that are likely to indicate
a file of the right format. The list is terminated by a {\cf NULL}
pointer.
\end{enumerate}
All of these items must be inside an `{\cf extern "C"}' block in order
to avoid name mangling by the C++ compiler, and we provide handy
macros {\cf OIIO_PLUGIN_EXPORTS_BEGIN} and {\cf OIIO_PLUGIN_EXPORTS_END}
to mamke this easy. Depending on your
compiler, you may need to use special commands to dictate that the
symbols will be exported in the DSO; we provide a special {\cf
OIIO_EXPORT} macro for this purpose, defined in {\fn export.h}.
Putting this all together, we get the following for our JPEG example:
\begin{code}
OIIO_PLUGIN_EXPORTS_BEGIN
OIIO_EXPORT int jpeg_imageio_version = OIIO_PLUGIN_VERSION;
OIIO_EXPORT JpgOutput *jpeg_output_imageio_create () {
return new JpgOutput;
}
OIIO_EXPORT const char *jpeg_input_extensions[] = {
"jpg", "jpe", "jpeg", NULL
};
OIIO_PLUGIN_EXPORTS_END
\end{code}
\item The definition and implementation of an \ImageOutput subclass for
this file format. It must publicly inherit \ImageOutput, and must
overload the following methods which are ``pure virtual'' in the
\ImageOutput base class:
\begin{enumerate}
\item {\cf format_name()} should return the name of the format, which
ought to match the name of the plugin and by convention is
strictly lower-case and contains no whitespace.
\item {\cf supports()} should return {\cf true} if its argument
names a feature supported by your format plugin, {\cf false} if it
names a feature not supported by your plugin. See
Section~\ref{sec:supportsfeaturelist} for the list of feature
names.
\item {\cf open()} should open the file and return true, or should
return false if unable to do so (including if the file was found
but turned out not to be in the format that your plugin is trying
to implement).
\item {\cf close()} should close the file, if open.
\item {\cf write_scanline} should write a single scanline to
the file, translating from internal to native data format and
handling strides properly.
\item The virtual destructor, which should {\cf close()} if the file
is still open, addition to performing any other tear-down activities.
\end{enumerate}
Additionally, your \ImageOutput subclass may optionally choose to
overload any of the following methods, which are defined in the
\ImageOutput base class and only need to be overloaded if the default
behavior is not appropriate for your plugin:
\begin{enumerate}
\item[(g)] {\cf write_scanlines()}, only if your format supports
writing scanlines and you can get a performance improvement when
outputting multiple scanlines at once. If you don't supply
{\cf write_scanlines()}, the default implementation will simply
call {\cf write_scanline()} separately for each scanline in the
range.
\item[(h)] {\cf write_tile()}, only if your format supports
writing tiled images.
\item[(i)] {\cf write_tiles()}, only if your format supports
writing tiled images and you can get a performance improvement
when outputting multiple tiles at once. If you don't supply
{\cf write_tiles()}, the default implementation will simply
call {\cf write_tile()} separately for each tile in the range.
\item[(j)] {\cf write_rectangle()}, only if your format supports
writing arbitrary rectangles.
\item[(k)] {\cf write_image()}, only if you have a more clever
method of doing so than the default implementation that calls
{\cf write_scanline()} or {\cf write_tile()} repeatedly.
\item[(l)] {\cf write_deep_scanlines()} and/or
{\cf write_deep_tiles()}, only if your format supports
``deep'' data images.
\end{enumerate}
It is not strictly required, but certainly appreciated, if a file format
does not support tiles, to nonetheless accept an \ImageSpec that specifies
tile sizes by allocating a full-image buffer in {\cf open()}, providing an
implementation of {\cf write_tile()} that copies the tile of data to the
right spots in the buffer, and having {\cf close()} then call
{\cf write_scanlines} to process the buffer now that the image has been
fully sent.
Here is how the class definition looks for our JPEG example. Note
that the JPEG/JFIF file format does not support multiple subimages
or tiled images.
\begin{code}
class JpgOutput : public ImageOutput {
public:
JpgOutput () { init(); }
virtual ~JpgOutput () { close(); }
virtual const char * format_name (void) const { return "jpeg"; }
virtual int supports (string_view property) const { return false; }
virtual bool open (const std::string &name, const ImageSpec &spec,
bool append=false);
virtual bool write_scanline (int y, int z, TypeDesc format,
const void *data, stride_t xstride);
bool close ();
private:
FILE *m_fd;
std::vector<unsigned char> m_scratch;
struct jpeg_compress_struct m_cinfo;
struct jpeg_error_mgr m_jerr;
void init () { m_fd = NULL; }
};
\end{code}
\end{enumerate}
Your subclass implementation of {\cf open()}, {\cf close()}, and {\cf
write_scanline()} are the heart of an \ImageOutput implementation.
(Also {\cf write_tile()}, for those image formats that support tiled
output.)
An \ImageOutput implementation must properly handle all data formats and
strides passed to {\cf write_scanline()} or {\cf write_tile()}, unlike
an \ImageInput implementation, which only needs to read scanlines or
tiles in their native format and then have the super-class handle the
translation. But don't worry, all the heavy lifting can be accomplished
with the following helper functions provided as protected member
functions of \ImageOutput that convert a scanline, tile, or rectangular
array of values from one format to the native format(s) of the file.
\apiitem{const void * {\ce to_native_scanline} (TypeDesc format, const void *data, \\
\bigspc stride_t xstride, std::vector<unsigned char> \&scratch, \\
\bigspc unsigned int dither=0,
int yorigin=0, int zorigin=0)}
Convert a full scanline of pixels (pointed to by \emph{data}) with the
given \emph{format} and strides into contiguous pixels in the native
format (described by the \ImageSpec returned by the {\cf spec()} member
function). The location of the newly converted data is returned, which
may either be the original \emph{data} itself if no data conversion was
necessary and the requested layout was contiguous (thereby avoiding
unnecessary memory copies), or may point into memory allocated within
the \emph{scratch} vector passed by the user. In either case, the
caller doesn't need to worry about thread safety or freeing any
allocated memory (other than eventually destroying the scratch vector).
\apiend
\apiitem{const void * {\ce to_native_tile} (TypeDesc format, const void *data,\\
\bigspc stride_t xstride, stride_t ystride, stride_t zstride,\\
\bigspc std::vector<unsigned char> \&scratch,
unsigned int dither=0, \\ \bigspc int xorigin=0,
int yorigin=0, int zorigin=0)}
Convert a full tile of pixels (pointed to by \emph{data}) with the given
\emph{format} and strides into contiguous pixels in the native format
(described by the \ImageSpec returned by the {\cf spec()} member
function). The location of the newly converted data is returned, which
may either be the original \emph{data} itself if no data conversion was
necessary and the requested layout was contiguous (thereby avoiding
unnecessary memory copies), or may point into memory allocated within
the \emph{scratch} vector passed by the user. In either case, the
caller doesn't need to worry about thread safety or freeing any
allocated memory (other than eventually destroying the scratch vector).
\apiend
\apiitem{const void * {\ce to_native_rectangle} (int xbegin, int xend, \\
\bigspc int ybegin, int yend,
int zbegin, int zend, \\ \bigspc
TypeDesc format, const void
*data, \\ \bigspc
stride_t xstride, stride_t ystride,
stride_t zstride, \\ \bigspc
std::vector<unsigned char> \&scratch,
unsigned int dither=0, \\ \bigspc int xorigin=0,
int yorigin=0, int zorigin=0)}
Convert a rectangle of pixels (pointed to by \emph{data}) with the given
\emph{format}, dimensions, and strides into contiguous pixels in the
native format (described by the \ImageSpec returned by the {\cf spec()}
member function). The location of the newly converted data is returned,
which may either be the original \emph{data} itself if no data
conversion was necessary and the requested layout was contiguous
(thereby avoiding unnecessary memory copies), or may point into memory
allocated within the \emph{scratch} vector passed by the user. In
either case, the caller doesn't need to worry about thread safety or
freeing any allocated memory (other than eventually destroying the
scratch vector).
\apiend
For {\cf float} to 8 bit integer conversions only, if {\cf dither} parameter
is nonzero, random dither will be added to reduce quantization banding
artifacts; in this case, the specific nonzero {\cf dither} value is used as
a seed for the hash function that produces the per-pixel dither amounts, and
the optional {\cf origin} parameters help it to align the pixels to the
right position in the dither pattern.
\bigskip
\bigskip
\noindent
The remainder of this section simply lists the full implementation of
our JPEG writer, which relies heavily on the open source {\fn jpeg-6b}
library to perform the actual JPEG encoding.
\includedcode{../jpeg.imageio/jpegoutput.cpp}
\section{Tips and Conventions}
\label{sec:plugintipsconventions}
\product's main goal is to hide all the pesky details of individual file
formats from the client application. This inevitably leads to various
mismatches between a file format's true capabilities and requests that
may be made through the \product APIs. This section outlines
conventions, tips, and rules of thumb that we recommend for image file
support.
\subsection*{Readers}
\begin{itemize}
\item If the file format stores images in a non-spectral color space
(for example, YUV), the reader should automatically convert to RGB to
pass through the OIIO APIs. In such a case, the reader should signal
the file's true color space via a \qkw{Foo:colorspace} attribute in
the \ImageSpec.
\item ``Palette'' images should be automatically converted by the reader
to RGB.
\item If the file supports thumbnail images in its header, the reader
should store the thumbnail dimensions in attributes
\qkw{thumbnail_width}, \qkw{thumbnail_height}, and
\qkw{thumbnail_nchannels} (all of which should be {\cf int}), and the
thumbnail pixels themselves in \qkw{thumbnail_image} as an array of
channel values (the array length is the total number of channel
samples in the thumbnail).
\end{itemize}
\subsection*{Writers}
The overall rule of thumb is: try to always ``succeed'' at writing the
file, outputting the closest approximation of the user's data as
possible. But it is permissible to fail the {\cf open()} call if it is
clearly nonsensical or there is no possible way to output a decent
approximation of the user's data. Some tips:
\begin{itemize}
\item If the client application requests a data format not directly
supported by the file type, silently write the supported data format
that will result in the least precision or range loss.
\item It is customary to fail a call to {\cf open()} if the \ImageSpec
requested a number of color channels plainly not supported by the
file format. As an exception to this rule, it is permissible for a
file format that does not support alpha channels to silently drop
the fourth (alpha) channel of a 4-channel output request.
\item If the app requests a \qkw{Compression} not supported by the file
format, you may choose as a default any lossless compression
supported. Do not use a lossy compression unless you are fairly
certain that the app wanted a lossy compression.
\item If the file format is able to store images in a non-spectral color
space (for example, YUV), the writer may accept a \qkw{Foo:colorspace}
attribute in the \ImageSpec as a request to automatically convert and
store the data in that format (but it will always be passed as RGB
through the OIIO APIs).
\item If the file format can support thumbnail images in its header, and
the \ImageSpec contain attributes \qkw{thumbnail_width},
\qkw{thumbnail_height}, \qkw{thumbnail_nchannels}, and
\qkw{thumbnail_image}, the writer should attempt to store the
thumbnail if possible.
\end{itemize}
\section{Building ImageIO Plugins}
\label{sec:buildingplugins}
FIXME -- spell out how to compile and link plugins on each of the major
platforms.
\chapwidthend
| {
"alphanum_fraction": 0.7165780103,
"avg_line_length": 46.4224489796,
"ext": "tex",
"hexsha": "223a8ed28f6cbde1c96eb851642226e6b99728c7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f67cce9b7b8623aca730af0fb2faaafcbe785dde",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "scott-wilson/oiio",
"max_forks_repo_path": "src/doc/writingplugins.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f67cce9b7b8623aca730af0fb2faaafcbe785dde",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "scott-wilson/oiio",
"max_issues_repo_path": "src/doc/writingplugins.tex",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f67cce9b7b8623aca730af0fb2faaafcbe785dde",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "scott-wilson/oiio",
"max_stars_repo_path": "src/doc/writingplugins.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5333,
"size": 22747
} |
\setchapterpreamble[u]{\margintoc}
\chapter{Class Options}
\labch{options}
In this chapter I will describe the most common options used, both the
ones inherited from \Class{scrbook} and the \Class{kao}-specific ones.
Options passed to the class modifies its default behaviour; beware
though that some options may lead to unexpected results\ldots
\section{\Class{KOMA} Options}
The \Class{kaobook} class is based on \Class{scrbook}, therefore it
understands all of the options you would normally pass to that class. If
you have a lot of patience, you can read the \KOMAScript\xspace
guide.\sidenote{The guide can be downloaded from
\url{https://ctan.org/pkg/koma-script?lang=en}.} Actually, the reading
of such guide is suggested as it is very instructive.
Every \KOMAScript\xspace option you pass to the class when you load it
is automatically activated. In addition, in \Class{kaobook} some options
have modified default values. For instance, the font size is 9.5pt and
the paragraphs are separated by space,\sidenote[-7mm][]{To be precise,
they are separated by half a line worth of space: the \Option{parskip}
value is \enquote{half}.} not marked by indentation.
\section{\Class{kao} Options}
In the future I plan to add more options to set the paragraph formatting
(justified or ragged) and the position of the margins (inner or outer in
twoside mode, left or right in oneside mode).\sidenote{As of now,
paragraphs are justified, formatted with \Command{singlespacing} (from
the \Package{setspace} package) and \Command{frenchspacing}.}
I take this opportunity to renew the call for help: everyone is
encouraged to add features or reimplement existing ones, and to send me
the results. You can find the GitHub repository at
\url{https://github.com/fmarotta/kaobook}.
\begin{kaobox}[frametitle=To Do]
Implement the \Option{justified} and \Option{margin} options. To be
consistent with the \KOMAScript\xspace style, they should accept a
simple switch as a parameter, where the simple switch should be
\Option{true} or \Option{false}, or one of the other standard values for
simple switches supported by \KOMAScript. See the \KOMAScript\xspace
documentation for further information.
\end{kaobox}
The above box is an example of a \Environment{kaobox}, which will be
discussed more thoroughly in \frefch{mathematics}. Throughout the book I
shall use these boxes to remarks what still needs to be done.
\section{Other Things Worth Knowing}
A bunch of packages are already loaded in the class because they are
needed for the implementation. These include:
\begin{itemize}
\item etoolbox
\item calc
\item xifthen
\item xkeyval
\item xparse
\item xstring
\end{itemize}
Many more packages are loaded, but they will be discussed in due time.
Here, we will mention only one more set of packages, needed to change
the paragraph formatting (recall that in the future there will be
options to change this). In particular, the packages we load are:
\begin{itemize}
\item ragged2e
\item setspace
\item hyphenat
\item microtype
\item needspace
\item xspace
\item xcolor (with options \Option{usenames,dvipsnames})
\end{itemize}
Some of the above packages do not concern paragraph formatting, but we
nevertheless grouped them with the others. By default, the main text is
justified and formatted with singlespacing and frenchspacing; the margin
text is the same, except that the font is a bit smaller.
\section{Document Structure}
We provide optional arguments to the \Command{title} and
\Command{author} commands so that you can insert short, plain text
versions of this fields, which can be used, typically in the half-title
or somewhere else in the front matter, through the commands
\Command{@plaintitle} and \Command{@plainauthor}, respectively. The PDF
properties \Option{pdftitle} and \Option{pdfauthor} are automatically
set by hyperref to the plain values if present, otherwise to the normal
values.\sidenote[-1.4cm][]{We think that this is an important point so
we remark it here. If you compile the document with pdflatex, the PDF
metadata will be altered so that they match the plain title and author
you have specified; if you did not specify them, the metadata will be
set to the normal title and author.}
There are defined two page layouts, \Option{margin} and \Option{wide},
and two page styles, \Option{plain} and \Option{fancy}. The layout
basically concern the width of the margins, while the style refers to
headers and footer; these issues will be
discussed in \frefch{layout}.\sidenote{For now, suffice it to say that pages with
the \Option{margin} layout have wide margins, while with the
\Option{wide} layout the margins are absent. In \Option{plain} pages the
headers and footer are suppressed, while in \Option{fancy} pages there
is a header.}
The commands \Command{frontmatter}, \Command{mainmatter}, and
\Command{backmatter} have been redefined in order to automatically
change page layout and style for these sections of the book. The front
matter uses the \Option{margin} layout and the \Option{plain} page
style. In the mainmatter the margins are wide and the headings are
fancy. In the appendix the style and the layout do not change; however
we use \Command{bookmarksetup\{startatroot\}} so that the bookmarks of
the chapters are on the root level (without this, they would be under
the preceding part). In the backmatter the margins shrink again and we
also reset the bookmarks root.
| {
"alphanum_fraction": 0.7784648963,
"avg_line_length": 45.4380165289,
"ext": "tex",
"hexsha": "d0b01259a96f27c4344399884bac18ca7bdac122",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c2fb69c9bc077a1ffe08e1258e4f6f735f8238cb",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "robertdstein/kaobook",
"max_forks_repo_path": "chapters/options.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c2fb69c9bc077a1ffe08e1258e4f6f735f8238cb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "robertdstein/kaobook",
"max_issues_repo_path": "chapters/options.tex",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c2fb69c9bc077a1ffe08e1258e4f6f735f8238cb",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "robertdstein/kaobook",
"max_stars_repo_path": "chapters/options.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1383,
"size": 5498
} |
\chapter{\textit{$\pi$SOD-M} Environment}
\label{chap:environment}
\epigraph{``\textit{The computer was born to solve problems that did not exist
before.}''}{Bill Gates}
This chapter describes the $\pi$SOD-M environment for developing reliable
service based systems using the methodology we propose. The $\pi$SOD-M
environment was developed to support the $\pi$SOD-M methodology. The $\pi$SOD-M
based process consists in generating an application starting from a
\textit{$\pi$-UseCase} model and then transforming it into series of models at
the PIM and PSM levels before finally generating the code that implements the
application.
% that implements
% the generation of {\em Policy} based services' compositions. For a given
% services' based application, the process consists in generating an application
% starting from a $\pi$-UseCase modeling. From a $\pi$-UseCase model,
% transformations between models are made until the platform specific model level
% to generate service composition specification code. Four model to model
% transformations are performed, three transformations between models, and one
% model to text transformation generates the service composition specification in
% $\pi$-PEWS language. All transformations are done through an Eclipse plugin. All
% models definition are the result of a general process defined by the $\pi$SOD-M
% method in which a set of models are built following a service oriented approach.
$\pi$SOD-M environment is built on the top of Eclipse framework
(http://www.eclipse.org), which is a framework to build Integrated Development
Environments (IDEs). We also use the Eclipse
Modelling Framework (EMF), a meta-modelling framework that was devised to be
extended and provides with the utilities needed to define, edit and handle
(meta)-models. To automate the transformation models we use the ATL Language\footnote{ATL uses EMF to handle models (that is to serialize, to navigate and to modify
them). Once the ATL transformation is executed, the ATL engine matches the
source pattern and the source model and instantiates each pattern into the
target model.} \cite{atl_manual}, a model transformation language framed in
Eclipse. Another language used for model transformation is Acceleo \cite{acceleo}, a
model to text engine. $\pi$SOD-M uses this environment~\footnote{\newText{The environmental requirements are the Eclipse Modelling (Galilleo or higher)
and all proposed plugins, \textit{$\pi$-UseCase, $\pi$-ServiceProcess,
$\pi$-SerciveComposition} and \textit{$\pi$-PEWS} editors; and also the
transformation plugins. Because the environment is proposed in the
context of Eclipse, the environmental requirements are also the same needed to
utilize the Eclipse tool.}} to generate service
composition specification code in \textit{$\pi$-PEWS}.
% then for each matching, the target pattern is instantiated in the target model, replacing the match found in the
% source model.
% Thus, ATL works perfectly with the models defined over EMF, as well as
% $\pi$SOD-M plugin. So one, ATL compiler provides features like multiples source
% patterns, rule inheritance, and endpoint rules.
% After the user model the application and do the
% transformations, as final result, he can have the correspondent code for
% the service application. Thus, the methodology development environment
% is composed of these two platforms (ATL and Acceleo) that assist in the development
% of $\pi$SOD-M based applications.
% It is not necessary the methodology's final user knows Acceleo or ATL
% for modeling an application. These languages have been used only for the
% tools development. The tool's user also does not need have extensive knowledge
% in MDA to use the tool. Knowledge of basic MDA's concepts and what means
% transformation between models are sufficient to adapt and use the tool in a real
% development environment.
The remainder of the chapter is organized as follows. Section
\ref{sec:architecture} describes the environment
architecture, including the definition of meta-models; and, the implementation
of the model transformation process and the code generation engine. In section
\ref{sec:install} describes how to install and use the environment. Section
\ref{sec:extendingEnvironment} describes how to extend the
environment, for adding new components for generation of system specification
in different languages. Section \ref{sec:env_conclusion} concludes the chapter.
\section{General Architecture}
\label{sec:architecture}
Figure \ref{fig:policymanager} presents the general architecture for the
$\pi$SOD-M environment and details how components interact with each other.
Each model has an editor, and transformation tools that support the application
development.
% This figure
% presents, in general, the same components described in figure
% \ref{fig:environmentComponents}, although emphasizing the context of Eclipse.
%
% The meta-models and their plugins for editing are at the highest
% level. In an intermediate level we have the ATL based transformation
% components for the models mapping, and finally, the code generation is
% accomplished through the mapping described in Acceleo, generating the
% specification to be executed.
\begin{figure} [ht!]
\centering
\includegraphics[width=1.0\textwidth]{chapters/methodology/figs/FrameworkOverviewFigure}
\caption{$\pi$SOD-M Development Environment.}
\label{fig:policymanager}
\end{figure}
The $\pi$SOD-M environment architecture is organized in
three layers: \textit{(i) Meta-model level, (ii) PIM-to-PIM Transformations
level level, (iii) PIM-to-PSM Transformations level
level} and \textit{(iv) Code generation level}.
Each layer comprises a set of components that
together support the $\pi$SOD-M environment.
This figure presents how those components are implemented by our tool: (i) the
\textit{Meta-model component} (figure \ref{fig:environmentComponents})
represents the \textit{Model plugins module} of figure \ref{fig:policymanager} (together with all methodology
meta-models); (ii) the \textit{PIM-to-PIM Transformations and PIM-to-PSM
Transformations components} (figure \ref{fig:environmentComponents}) represent
the \textit{Mapping plugins module} of figure \ref{fig:policymanager};
(iii) the \textit{Code Transformation} component of the figure
\ref{fig:environmentComponents} is implemented by the \textit{Code generation
module} of figure \ref{fig:policymanager}; and (iv) the \textit{Execution
engine} is represented by the lower tier of figure \ref{fig:policymanager}.
% The main components
% are: \textit{(I)} - \textit{$\pi$-UseCase} ecore meta-model,
% \textit{$\pi$-ServiceProcess} ecore meta-model, \textit{$\pi$-ServiceComposition} ecore meta-model and
% \textit{$\pi$-PEWS} ecore meta-model, that are
% sub-components of the \textit{Meta-model component} (\textit{EMF Ecore
% meta-model level}); \textit{(II)} - \textit{$\pi$-UseCase2$\pi$-ServiceProcess} component and
% \textit{$\pi$-ServiceProcess2$\pi$-ServiceComposition} component, that are
% sub-components of the \textit{PIM-to-PIM Transformation component} (\textit{Model transformation
% level}); \textit{(III)} - \textit{$\pi$-ServiceComposition2$\pi$-PEWS}
% component, that are sub-components of the \textit{PIM-to-PSM Transformation
% component}; \textit{(IV)} - the \textit{Code Transformation} component
% (\textit{Code generation level}); and \textit{(V)} - the \textit{Execution Engine} component, comprising both,
% \textit{APolicy Engine} and \textit{PEWS Engine} components.
\begin{figure} [ht!]
\centering
\includegraphics[width=1.0\textwidth]{chapters/implementation/figs/componentsWOE}
\caption{Environment Components.}
\label{fig:environmentComponents}
\end{figure}
The \textit{Models plugin module} comprises the components that describe
the $\pi$SOD-M meta-models, and how their models must be created. There are
four meta-model components. All components of the \textit{Mapping plugin module}
depend of the definitions made in \textit{Models plugin}.
When a model transformation is made, the models must comply with their
respective meta-model. The process is executed as follows: every time a
transformation is performed, a consistency check of both, source and target
models is performed. After all transformation are made, the PSM model is
translated into code of a particular platform, in the case of $\pi$SOD-M,
\textit{$\pi$-PEWS} is the chosen platform. The transformation of PSM model in
code is the last stage of transformation. The component of the \textit{Code
generation module} depend of the PSM generated by the last model
transformation. Finally, the \textit{Execution Engine} component performs the
execution of the service-based specification code.
% The following three sections will detail each particular feature of the
% $\pi$SOD-M development environment.
\subsection{Ecore Meta-models (\textit{Models Plugin Module})}
The implementation of each meta-model is defined in \textit{Ecore}\footnote{The
\textit{Models Plugin Module} is a set of Ecore files that represents all $\pi$SOD-M meta-models. These meta-models are the sources for the development of
each of the methodology's model. All models designed for an application must
obey their meta-model specification. This module is composed by all the proposed
methodology's models in EMF (\textit{.ecore extension}).} files, they are:
\textit{$\pi$-UseCase, $\pi$-ServiceProcess, $\pi$-ServiceComposition} and
\textit{$\pi$-PEWS}. All meta-models have a related
\textit{genmodel}\footnote{A \textit{.genmodel} is a intermediary file format
used to produce the syntax editor for each meta-model.} definition. The plugin
editor can be created from the \textit{genmodel} definition, so that models can
be specified.
% The Ecore Tools component provides a complete environment to create, edit and
% maintain Ecore models. This component eases handling of Ecore models with a
% Graphical Ecore Editor and bridges to other existing Ecore tools.
% From a model specification, EMF provides tools and runtime support to produce a
% set of classes for the model, a set of adapter classes that enable viewing and
% command-based editing of the model, and a basic editor. Models can be specified
% using annotated UML, XML documents, or modeling tools, then imported into
% EMF. Most important of all, EMF provides the foundation for interoperability
% with other EMF-based tools and applications.
% Although there are editors for each model of the methodology, it is still
% necessary components that can make a transformations between them. Thus the
% editing process need not be complete at all levels, but only at the highest
% level, \textit{i.e.} $\pi$-UseCase model, and then make automatic generation for
% the other models.
%After the creation of each meta-model were created their respective plugins.
Using these set of tools (model editors) it is possible to create models
at different $\pi$SOD-M levels. There are editors for all $\pi$SOD-M models:
\textit{$\pi$-UseCase editor, $\pi$-ServiceProcess editor,
$\pi$-ServiceComposition} editor and \textit{$\pi$-PEWS} editor.
Although there are editors for each methodology model, it is still
necessary components to perform transformations among them. Thus, the
model specification process can be made in all methodology levels. However it
can be made only at the highest level, \textit{i.e.} \textit{$\pi$-UseCase}
model, and then execute automatic transformation to generate lowest level
models.
\subsection{Model Transformation (\textit{Mapping Plugin Module})}
The \textit{Model transformation level} has a set of components for processing
and transforming models. The model transformation components are based on
the source models for generating the equivalent target model. For example, from
a \textit{$\pi$-UseCase} model is generated the
\textit{$\pi$-ServiceProcess} model, from a \textit{$\pi$-ServiceProcess}
model is generated the \textit{$\pi$-ServiceComposition} model, and finally,
from a \textit{$\pi$-ServiceComposition} model is generated a
\textit{$\pi$-PEWS} model. After the model generation the
designer can perform refinements. Refinement can improve and adjust elements
that require a more detailed description at this modeling level.
% Figure \ref{fig:policymanager} shows the environment components for the
% processing models transformation.
$\pi$SOD-M model transformation process is based on the rules described in
chapter \ref{chapter:methodology}. The implementation process requires
additional, more specific information to be taken into account. We had to
consider the representation of our concepts in the Eclipse and ATL environments
for MDA-based application development, \textit{e.g.}, aspects of plugin
generation; design model properties; compliance of the designed
meta-model with the generated model; the specific model implementation; and etc.
% \begin{itemize}
% \item Each meta-model is modeled using the EMF environment. During the
% meta-model definition, a beta plugin version was being generated to verify the
% model development consistence and coherence. For example the $\pi$-UseCase
% meta-model, during its definition, short use cases models examples were
% developed to validate our definition;
% \item A stable plugin version was generated and then a complete example was
% modeled to validate the usability of each $\pi$SOD-M model plugin. The
% sequence of the model plugin development were:
% \textit{$\pi$-ServiceComposition, $\pi$-PEWS, $\pi$-UseCase} and
% \textit{$\pi$-ServiceProcess}. We first generated the low level plugins to
% validate the code generation from a service composition specification.
% Immediately after we developed the high level model to the complete $\pi$SOD-M
% support.
% \item With all models well developed and assembled in the tool for each
% aspect of service model development, it was necessary proceed with the
% automatic transformation. Each transformation represents, from a specific
% $\pi$SOD-M model, that it is possible to generate a equivalent lower level
% $\pi$SOD-M model, \textit{e.g.}, (i) from the $\pi$-UseCase model, the
% $\pi$-ServiceProcess can be generated; (ii) from the $\pi$-ServiceProcess
% model a $\pi$-ServiceComposition can be generated; (iii) from the
% $\pi$-ServiceComposition model, the $\pi$-PEWS model can be generated; and
% (iv) from the $\pi$-PEWS model, the system service composition code can be
% generated.
% \item By the end, we configure a $\pi$SOD-M Eclipse version for research use.
% \end{itemize}
We used the Eclipse Modeling Framework (EMF) for implementing the
meta-models \textit{$\pi$-UseCase, $\pi$-ServiceProcess,
$\pi$-ServiceComposition} and \textit{$\pi$-PEWS}. The ATL language was used
for developing the mapping rules for transformation of models
(\textit{$\pi$-UseCase2$\pi$-ServiceProcess},
\textit{$\pi$-ServiceProcess2$\pi$-ServiceComposition} and
\textit{$\pi$-ServiceComposition2$\pi$-PEWS} plug-ins).
This plugin takes as input a \textit{$\pi$-PEWS} model implementing a specific
service composition and it generates the code to be executed by the {\em Policy} based
\textit{Execution Engine} (figure \ref{fig:environmentComponents}).
\begin{figure} [ht!]
\centering
\includegraphics[width=1.0\textwidth]{chapters/methodology/figs/modelTrasformation}
\caption{ATL Model to Model Transformation in $\pi$SOD-M.}
\label{fig:modelTomodelTransfomation}
\end{figure}
Figures \ref{fig:modelTomodelTransfomation} and
\ref{fig:modelToTextTransformation} present a general view of the $\pi$SOD-M
models transformation, showing the set of plug-ins developed to
implement it. Figures \ref{fig:modelTomodelTransfomation} presents the
model-to-model transformations, while figure
\ref{fig:modelToTextTransformation} shows the model-to-text transformation
schema. The environment implements the abstract architecture shown in figure
\ref{fig:policymanager}. This environment consists of plug-ins implementing the
\textit{$\pi$-UseCase, $\pi$-ServiceProcess, $\pi$-ServiceComposition} and
\textit{$\pi$-PEWS} meta-models used for defining models; and ATL rules for
transforming PIM and PSM models (model to model transformation) and finally
generating code (model to text transformation) with Acceleo.
\begin{figure} [ht!]
\centering
\includegraphics[width=0.7\textwidth]{chapters/methodology/figs/AcceleoTransformationFigure}
\caption{Acceleo Model to Text Transformation in $\pi$SOD-M.}
\label{fig:modelToTextTransformation}
\end{figure}
In order to proceed with a model transformation it is necessary to configure the
transformation environment. Figure \ref{fig:configurationATL} presents a
standard screen for configuring each transformation (in this case for the ``to
publish music'' scenario). From any source model, for example,
\textit{$\pi$-UseCase, $\pi$-ServiceProcess} or
\textit{$\pi$-ServiceComposition}, the system can perform the automatic
transformation. Using the transformation tool of figure
\ref{fig:configurationATL} requires from the user: (1) to indicate the
ATL transformation rules file; (2) to choose the source meta-model reference
(.ecore file); (3) to choose the target meta-model reference (.ecore file); (4)
to choose the source model that will be transformed (\textit{e.g.
music.piusecase} file); (5) to choose the target model that will be generated
(\textit{e.g. music.piserviceprocess} file); and finally, (6) to run the tool.
The same must be performed for all model-to-model transformations. For model-to-text
transformation, the rule file to be chosen should be the
Acceleo file.
\begin{figure}[ht!]
\centering
\includegraphics[width=.90\textwidth]{chapters/implementation/figs/ATL-trasnformationPiUseCase2PiServiceProcess.pdf}
\caption{ATL Configuration for $\pi$SOD-M Transformation.}
\label{fig:configurationATL}
\end{figure}
As an example, figure \ref{fig:configurationATL} shows the configuration for the
transformation from \textit{$\pi$-UseCase} (source) to
\textit{$\pi$-ServiceProcess} (target). Notice that, in this case, the reference
meta-models must be the same. For further transformations, this process must
follow the same sequence, changing only the models and reference meta-models.
\subsubsection{\textit{$\pi$-UseCase2$\pi$ServiceProcess} Transformation Rules}
The transformation rules describe how models are transformed. As a general rule,
the automatic transformation of models favors a faster development of
applications. In most cased, the environment or the designer should verify if
the transformations are valid or not. In the $\pi$SOD-M environment, the
consistency of the transformations must be performed by the designer. If a
problem on the transformation is identified, the target model can be modified manually.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:exampleRules,caption=ATL Example Rule.]
1 rule ruleName{
2 from
3 var_sourceName: SourceModel!Entity
4 to
5 var_targetName: TargetModel!Entity(
6 atribute_1 <- var_sourceName.atribute_a,
7 atribute_2 <- var_sourceName.atribute_b
8 )
9 }
\end{lstlisting}
\label{fig:exampleRule}
\end{figure}
In ATL, there exist two different kinds of rules that correspond to the two
different programming modes provided by ATL (e.g. declarative and imperative
programming). The \textit{matched rules}\footnote{The matched rules constitute
the core of an ATL declarative transformation since they make it possible to
specify 1) for which kinds of source elements target elements must be generated,
and 2) the way the generated target elements have to be initialized. A matched
rule is identified by its name. It matches a given type of source model element,
and generates one or more kinds of target model elements. The rule specifies the
way generated target model elements must be initialized from each matched source
model element\cite{atl_manual}.} (declarative programming) and the
\textit{called rules}\footnote{The called rules provide ATL developers with convenient
imperative programming facilities. Called rules can be seen as a particular type
of helpers: they have to be explicitly called to be executed and they can accept
parameters. However, as opposed to helpers, called rules can generate target
model elements as matched rules do. A called rule has to be called from an
imperative code section, either from a match rule or another called
rule\cite{atl_manual}.} (imperative programming) \cite{atl_manual}. For the $\pi$SOD-M environment, we
use both types of rules. Listing \ref{list:exampleRules} shows a general example of an
ATL \textit{matched rule}. According to this, we will present the main ATL rules used to
implement the model transformation. As described in listing
\ref{list:exampleRules}, a rule consists of a name (line 1), a source entity
(\textit{from} clause in lines 2-3), and one or more target entities (\textit{to} clause in line 4-5). Each entity has a name, such as \textit{var\_sourceName} and
\textit{var\_targetName}. The transformation is performed by each entity
attribute and general rules established in each case.
% The rules for automatic \textit{$\pi$-UseCase2$\pi$ServiceProcess}
% transformation comply with the description made in section
% \ref{sec:models-tranformation}, and we present some rules in ATL for model
% transformation in $\pi$SOD-M.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:useCase2Action,caption=ATL -
piUseCase2piServiceProcess : useCase2action Rule. ]
1 rule useCase2action {
2 from usecase : PiUseCase!FunctionalBasicUseCase(
3 usecase.extend->size() >= 2
4 )
5 to sp_action : PiServiceProcess!Action (
6 name <- usecase.name
7 ), sp_serviceActivity : PiServiceProcess!ServiceActivity(
8 action <- sp_action,
9 name <- usecase.name + 'SA',
10 serviceProcess <- thisModule.raiz_piUseCase
11 ), sp_contract: PiServiceProcess!Contract(
12 name <- usecase.name + 'Contract',
13 assertionInContract <- usecase.UCContraint,
14 action <- sp_action,
15 serviceProcess <- thisModule.raiz_piUseCase
16 )
17 }
\end{lstlisting}
%\label{fig:pewscontract}
\end{figure}
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:constraint2contract,caption=ATL -
piUseCase2piServiceProcess : constraint2contract Rule. ]
1 rule constraint2contract{
2 from
3 constraint: PiUseCase!UseCaseConstraint
4 to
5 assertion: PiServiceProcess!Assertion(
6 name <- constraint.name,
7 description <- constraint.description
8 )
9 }
\end{lstlisting}
%\label{fig:pewscontract}
\end{figure}
Listing \ref{list:useCase2Action} shows the ATL transformation rule between a
{\sc Use Case} and an {\sc Action}, while listing \ref{list:constraint2contract}
shows the transformation rule between {\sc Constraint} and {\sc Assertion}.
There is a restriction in the rule presented in listing
\ref{list:useCase2Action} (line 3), which defines that there must be more than
two {\sc Extend} relation between use cases. In this case, all use cases are
transformed into actions, and are grouped into a {\sc Service Activity} (lines
7-10). And all the constraints are transformed in a set of assertions that are
automatically associated with a {\sc Contract} (lines 11-15). This
transformation (listing \ref{list:useCase2Action}) describes that from a {\sc
Use Case} (lines 2), {\sc Actions}, {\sc Service Activity} and {\sc Contract}
can be generated (lines 5, 7 and 11). It depends on the existence of {\sc
Constraint} or dependence elements ({\sc Incllude} or {\sc Extend}) related to
this use case (line 3).
The listing \ref{list:constraint2contract} describes that all {\sc Constraint}
(\textit{from} clause in lines 2-3) are directly transformed into an
{\sc Assertion} (\textit{to} clause in lines 4-5) (there is no restriction in
the this clause). The restrictions are treated in separate rules such as in
listing \ref{list:useCase2Action}.
\subsubsection{\textit{$\pi$-ServiceProcess2$\pi$-ServiceComposition}
Transformation Rules}
As the \textit{$\pi$-ServiceComposition} model is a
refinement of some concepts defined in \textit{$\pi$-ServiceProcess}, such as
{\sc Contract} and {\sc Activity Services}, most of the transformation
between these models are through direct transformation, without restrictions.
We present the ATL transformation (listing
\ref{list:rooServiceProcess2rootServiceComposition}) of the root element that
comprises the main elements of both models.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:rooServiceProcess2rootServiceComposition,caption=ATL -
piServiceProcess2piServiceComposition : root Rule. ]
1 rule root {
2 from
3 root: piServiceProcess!ServiceProcess
4 to
5 root_service: piServiceComposition!CompositionServiceModel(
6 activities <- root.activity,
7 edges <- root.edge,
8 compositionPolices <- root.contract
9 )
10 }
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
Listing \ref{list:rooServiceProcess2rootServiceComposition} shows the
transformation rule for the main elements from \textit{$\pi$-ServiceProcess}
to \textit{$\pi$-ServiceComposition} model. The {\sc Activities} and its
{\sc Edges}, and {\sc Contracts}, which are transformed into {\sc Policy} (lines
6-8). Other rules that describe the transformation between {\sc Assertions} into
policy {\sc Rule} are other type of transformation. However, other rules
describe that all {\sc Contracts} must belong to a specific {\sc Non-functional
Requirement}, for example, all contracts for the performance restrictions, will
be grouped into a single performance {\sc Policy}. This listing is an ATL
example for \textit{$\pi$-ServiceProcess} model transformation into
to \textit{$\pi$-ServiceComposition} model.
\subsubsection{\textit{$\pi$-ServiceComposition2$\pi$-PEWS} Transformation
Rules}
The transformation \textit{$\pi$-ServiceComposition2$\pi$-PEWS} is
unique among PIM and PSM levels, however there is no difference in the
rules description in ATL, since all rules are defined in terms of
meta-models rather than specific models. Thus, PIM-to-PSM transformation rules
in ATL follow the same syntax and semantics of the PIM-to-PIM transformation
rules.
The model to be generated in this transformation, namely \textit{$\pi$-PEWS}
model, will be used as input for the code generation component, so that the
application code specification is generated. The listings \ref{list:rootSC2PEWS} and \ref{list:rulePre} present two
transformation rules in ATL. The first describes the transformations of the main
elements for the description of a service composition, the main path, the name
of the specification, the services and policies (lines 6 - 9), while
listing \ref{list:rulePre} describes a policy {\sc Rule} and its related concepts, such as
{\sc Action}, {\sc Event} and {\sc Condition}. The ATL rule has a constraint
to be checked (line 4), what kind of {\sc Rule} is being translated for the
specific language, because depending on the type, the transformation will be
change. The {\sc Rule} element (line 15) consists of all the properties necessary to
create a {\sc Pre-condition}, such as {\sc Action}, {\sc Event} and {\sc Condition} (lines 16-18),
and to which {\sc Policy} the {\sc Pre-condition} is related (line 19).
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:rootSC2PEWS,caption=ATL -
piServiceComposition2piPEWS : root Rule. ]
1 rule root{
2 from sCM : pisc!CompositionServiceModel
3 to
4 path: pipews!Path (),
5 pews : pipews!PEWSCTSpec (
6 name <- 'newModelPEWSpecName',
7 has <- path,
8 contains <- sCM.partition,
9 defines <- thisModule.policies
10 )
11 }
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:rulePre,caption=ATL -
piServiceComposition2piPEWS : Pre-condition Rule. ]
1 rule rulePre{
2 from
3 r: pisc!Rule (
4 r.event = #PRE)
5 to
6 rAct: pipews!Action(
7 act <- r.action
8 ),
9 rEvt: pipews!Event(
10 type <- #ActivityPrepered
11 ),
12 rCond: pipews!Condition(
13 expression <- r.condition
14 ),
15 rRule: pipews!Precondition(
16 calls <- rAct,
17 defines <- rCond,
18 hasSome <- rEvt,
19 policy <- r.policy
20 )
21 }
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
% The next section will show how the code is generated from a \textit{$\pi$-PEWS}
% model.
\subsection{Code Generation (\textit{Code Generation Module})}
}
\newText{
The $\pi$SOD-M methodology and its environment use $\pi$-PEWS\footnote{Other meta-models and
transformations can be defined for supporting code generation into WS-BPEL,
XAML and other languages adapted for describing business/service process.} as
intermediate language for supporting the generation of executable code.
$\pi$-PEWS is a behavioral language for defining web services interfaces by
specifying the input/output interface of a web service exported method but also
the expected behaviour of its methods (e.g., dependencies, exchanged messages).
The extension $\pi$-PEWS extension was inspired by languages like JCML
\cite{CostaMMN12}, JML \cite{LeavensKP07} and Eiffel \cite{Meyer92b}, which
introduce the notion of contract for specifying the behavior of a function. In
$\pi$-PEWS this behaviour can be specified by defining the order in which the
methods of services will be executed within a business process and methods
input/output restrictions.
}
The $\pi$SOD-M architecture's code generation component (\textit{\textit{Code
generation level}}) is a \textit{$\pi$-PEWS} specification generator. The
code is produced from a \textit{$\pi$-PEWS} model, after it be generated by a
model transformation from \textit{$\pi$-ServiceComposition} model. This
component was implemented using Acceleo \cite{acceleo}. Figure
\ref{fig:sequenceDiagram} presents a sequence diagram describing the model
transformation process and how the designer interacts with the environment to
specify each $\pi$SOD-M model until the specification code is generated.
\begin{figure}[ht!]
\centering
\includegraphics[width=1.0\textwidth]{chapters/implementation/figs/codeGeneration.pdf}
\caption{Acceleo Specification for \textit{$\pi$-PEWS} Code Generation.}
\label{fig:acceleoCode}
\end{figure}
Figure \ref{fig:acceleoCode} presents the \textit{$\pi$-PEWS} meta-model
developed using EMF and some pieces of Acceleo specification. This figure shows
the specification for the \textit{Namescape, Operation, Service} and
\textit{Contract} code generation for \textit{$\pi$-PEWS}. After the code
transformation process, a \textit{.pews} file is created. The listing
\ref{list:namespace}, \ref{list:service} and \ref{list:contract} present parts of Acceleo code for the \textit{$\pi$-PEWS} code generation. The code are the same presented in figure
\ref{fig:acceleoCode}, which presents the relation between the meta-model
concepts and the Acceleo code. The code generation follow the language syntax
described in appendix \ref{append:pews_language}.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:namespace,caption=Acceleo - Namespace and
Operation Code Specification.]
1 <%script type="PiPEWSMetamodel.PEWSSpec" name="default"
2 file="<%name%>.pews"%>
3 //-----------------------------------------------------------------
4 //------------ <%name%>.pews Service Specification ----------------
5 //-----------------------------------------------------------------
7 //Namespaces
8 <%for (contains){%>
9 namespace <%name%> = <%WDSLAdress%>
10 <%}%>
12 //Operations
13 <%for (has.eAllContents("Operation")){%>
14 alias <%alias%> = portType/<%name%> in <%isDefinedIn.name%>
15 <%}%>
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
In listing \ref{list:namespace} (lines 1-2) references the
meta-model, the root element (\textit{PEWSSpec}), and the
name of the generated file (\textit{<\%name\%>.pews}). Lines 3-5 presents the
name of the service specification. Lines 7-10 describes a \textit{for}
integration in the \textit{contains} relationship between \textit{PEWSSpec} and
\textit{Namespace}, and lines 12-15 all operations defined in each
\textit{Namespace} is generated. This operations came from the
\textit{isDefinedIn} relationship between \textit{Namespace} and
\textit{Operation} entities.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:service,caption=Acceleo - Service Code
Specification.]
1 //Services
2 <%for (has.eAllContents("CompositeOperation")) {%>
3 service <%name%> = <%contains.left.filter("Operation").alias%>
4 <%if ((contains.nameOperator.toString().equalsIgnoreCase("sequence"))) {%>
5 .
6 <%}else if((contains.nameOperator.toString().equalsIgnoreCase("parallel"))){%>
7 ||
8 <%}%>
9 <%contains.right.filter("Operation").alias%>
10 <%}%>
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
Listing \ref{list:service} presents the Acceleo specification for the
\textit{$\pi$-PEWS} model transformation into code. A service is an alias for
one or more operations. Listing \ref{list:contract} specifies the contract generation, using the
\textit{defines} relationship between \textit{APolicy} and \textit{Rule}. Each
contract have a set of rules for the specification.
\begin{figure}
\tiny
\centering
\begin{lstlisting}[label=list:contract,caption=Acceleo - PEWS Contract
Code Specification.] <%script type="PEWSMetamodel.PEWSCTSpec"
1 //Contract
2 <%for (defines) {%>
3 defContract <%name%>Contract{
4 isAppliedTo: <%isRelatedWith.filter("Operation").alias%>;
5 <%for (defines.filter("Precondition")) {%>
6 requires: <%defines.expression%>
7 (OnFailureDo: <%calls.act.toString()%>);
8 <%}%>
9 <%for (defines.filter("Postcondition")) {%>
10 ensures: <%defines.expression%>
11 (OnFailureDo: <%calls.act.toString()%>);
12 <%}%>
13 <%for (defines.filter("TimeRestriction")) {%>
14 timeConstraints: <%defines.expression%>
15 <%}%>
16 }
<%}%>
\end{lstlisting}
\label{fig:pewscontract}
\end{figure}
The generated code can be executed in both, \textit{$\pi$-PEWS}
and A-Policy engines\footnote{ Both engines are currently under development. The
\textit{$\pi$-PEWS} engine is being developed at UFRN anf the A-Policy engine in
being developed at Grenoble/France.} These 2 engines are not native components
in the $\pi$SOD-M plugin. The environment supports the process design to
generate code in any language. New language editor components, like BPEL or XAML, can be easily
coupled to the environment. Therewith, it is necessary to add the language
meta-model and make the transformation process. Thus, from a
\textit{$\pi$-ServiceComposition} model, different models and codes can be
generated (not only \textit{$\pi$-PEWS}). This requires only the definition of
equivalent meta-models, and the corresponding code transformation rules.
The composition engine manages the life cycle of the composition. Once a
composition instance is activated, the engine schedules the composition
activities according to the composition control flow. Each activity is seen as
a process where the service method call is executed. The execution of an
activity has four states: prepared, started, terminated, and failure. The
execution of the control flow (sequence, and/or split and join) can also be
prepared, started, terminated and raise a failure.
At execution time, the evaluation of policies done by the {\sc Policy} manager
must be synchronized with the execution of the services composition (i.e., the
execution of an activity or a control flow). Policies associated to a scope are
activated when the execution of its scope starts. A {\sc Policy} will have to
be executed only if one or several of its rules is triggered. If several rules
are triggered, the {\em Policy} manager first builds an execution plan that
specifies the order in which such rules will be executed according to the
strategies defined in the following section.
%Once rules have been executed, the {\em A-policy} finishes its execution and returns to a sleeping state.
If rules belonging to several policies are triggered then policies are also
ordered according to an execution plan. The execution of policies is out of the
scope of this thesis, the interested reader can refer to
\cite{Espinosa-Oviedo2011a} for further details.
\begin{landscape}
\begin{figure}
\includegraphics[width=25cm,height=15cm]{chapters/implementation/figs/sequenceDiagram.pdf}
\caption{Model Transformation Process.}
\label{fig:sequenceDiagram}
\end{figure}
\end{landscape}
% The next section will describe how the creation and generation of each model in
% SOD-M is done, for better understanding the transformations process
% represented in figure \ref{fig:sequenceDiagram} .
\section{Defining Reliable Service Based Applications}
\label{sec:install}
% \footnote{The environment download
% can be performed at http://www3.ifrn.edu.br/\~placido/piSOD-M}
The $\pi$SOD-M environment development
starts with the creation of a project and then the definition of a
\textit{$\pi$-UseCase} model\footnote{To create a model, the user must execute the sequence: \textit{File > New
> Other > EMF Model Wizard}, and choose one of the methodology's model.}, supposing that the business and requirement
specification document have been previously completed. Figure \ref{fig:screanPiSODM} presents the views provided by the environment:
\textit{Project view, Menu view, Editor view} and \textit{Properties view}.
% As $\pi$SOD-M environment is based on Eclipse, the developer and analyst
% need not struggle to adapt to the modeling environment. Figure
% \ref{fig:screanPiSODM} presents a tool overview, which has four views:
\begin{figure}[ht!]
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/telaPiSOD-M.pdf}
\caption{$\pi$SOD-M Eclipse Plugin Environment.}
\label{fig:screanPiSODM}
\end{figure}
% For editing is important to follow the concepts presented in $\pi$SOD-M
% meta-models, because it must follow rigorously its structure.
\subsection{\textit{$\pi$-UseCase} Model}
The goal of creating a \textit{$\pi$-UseCase} is to represent the functions and
system services described in the requirements specification and
business specification documents, which are the requirements input for this
phase. In accordance to the process described in figure \ref{fig:sequenceDiagram}, the designer receives the
documents as input and creates the \textit{$\pi$-UseCase} model. With this
model created, the transformation component generates the
\textit{$\pi$-ServiceProcess} model as output.
To create the \textit{$\pi$-UseCase} model, it is necessary
choose the root element (\textit{Model}) as the starting point of modeling,
during the process of create a ``\textit{new .piusecase file}'' (
using the sequence, \textit{File > New > Other > EMF Model Wizard}). From
this point the model is created as a tree of elements with its specific
references, as shown in Figure \ref{fig:pisodmToolModel}, which shows the model elements and its equivalence in the graphical $\pi$-UseCase
model, each element is built in an iterative way and must obey the hierarchy
and its relationships.
Each model element has a number of childs or siblings to which it is related to,
\textit{e.g.}, an {\sc Use Case} relates {\sc Constraint}, {\sc Extend} and
{\sc Include} elements, as well as {\sc NFRs} relates {\sc NFAs}.
Figure \ref{fig:pisodmToolModel} shows how to create the model elements for the
``\textit{to publish music}'' scenario.
\begin{figure}[ht!]
\centering
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/create_model-UseCase.pdf}
\caption{$\pi$-UseCase Model Definition in $\pi$SOD-M Eclipse Plugin.}
\label{fig:pisodmToolModel}
\end{figure}
\begin{exampl}
Items 1 and 2 in Figure \ref{fig:pisodmToolModel} show how to create an
{\sc Actor} and a {\sc Requirement}, item 3 presents the \textit{update music}
{\sc Use Case} and how to create a {\sc Constraint} related to this element.
Item 4 makes reference to a {\sc Constraint} and finally, the items 5 and 6 are
equivalent to a {\sc NFR} and a {\sc Package} elements, respectively. Each item
in the model refers to a equivalent element in the graphical model.
\end{exampl}
Figure \ref{fig:pisodmToolModelProperties} presents some configuration
properties. After creating an element, it is necessary to set its properties.
All elements have properties that describes their relationship and specification
values. These properties are used to give specification details, as
well as future transformations.
% All properties must be set, except of the
% properties are not used in the specification of one specific application, for
% example, there is no exceptional behaviour for an action. In this case it is not
% necessary to set this property. Another example that is no necessary set a
% element property is an actor or use case are not related with a package.
\begin{exampl}
Item 1 in Figure \ref{fig:pisodmToolModelProperties} shows the {\sc Use Case}
element properties for the ``\textit{to publish music}'' application. After the
creation of a use case it is necessary to create the actor, its \textit{name}, which \textit{requirement} the \textit{use case}
belongs, and the respective \textit{package}. Item 2 shows the properties of a {\sc Constraint} element. In a
constraint, its type must be explicit (\textit{VALUE, BUSINESS} or
\textit{EXCEPTIONALBEHAVIOUR}), its \textit{description}, with all candidate
\textit{variables} described with an `\textit{@}', its \textit{name} and which
{\sc Non-Functional Attributes} element this constraint is related to,
\textit{e.g.}, \textit{Authentication}, is an attribute of the
{\sc Non-Functional Requirements}, \textit{Security}. Items 3 and 4 show the
``\textit{To Publish Music}'' requirement properties and package
``\textit{app}'', respectively. The properties of each element obey the elements
described in the \textit{$\pi$-UseCase} meta-model.
\end{exampl}
\begin{figure}[ht!]
\centering
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/model-UseCase.pdf}
\caption{\textit{$\pi$-UseCase} Properties in $\pi$SOD-M Eclipse Plugin.}
\label{fig:pisodmToolModelProperties}
\end{figure}
\subsection{\textit{$\pi$-ServiceProcess} Models}
The goal of creating a \textit{$\pi$-ServiceProcess} model is to represent the
system execution flow. The designer receives the
\textit{$\pi$-UseCase} model as input to generate the
\textit{$\pi$-ServiceProcess} model. After the \textit{$\pi$-ServiceProcess}
model be generated, the designer calls again the model transformation component
to generate the \textit{$\pi$-ServiceComposition} model as output (figure
\ref{fig:sequenceDiagram}).
To create the \textit{$\pi$-ServiceProcess} model, it is necessary
choose the root object (\textit{Service Process}) as the starting point of
modeling, during the process of create a ``\textit{new .piserviceprocess file}''
option. From this point on, the model is created as a tree with its specific
references.
% Figure \ref{fig:piserviceProcessToolModel}
% presents the service process model, a graphical model reference and detail of
% how create each model element. It is possible to create the $\pi$-ServiceProcess
% model in isolation, however this model is generated from the previous model
% descriptions.
As this model is a refinement of the concepts described in the previous model,
its information is part of the information and properties of the
\textit{$\pi$-UseCase} model, but they focus on the process workflow
and application functions.
\begin{exampl}
Figure \ref{fig:piserviceProcessToolModel} shows the complete model of the example scenario and its main components. From
the root element the user can create the following elements: {\sc Service
Activity, Object Flow, Control Flow, Contract} and {\sc Non-Functional
Attribute}. In Figure \ref{fig:piserviceProcessToolModel}, item marked with 1
highlights the creation of the {\sc Control Flow }. Items 2 and 3 show the elements {\sc Fork
Node} and {\sc Join Node}, respectively, essential for the execution flow
description. Items 4 show the {\sc Action} element, which describes the
\textit{download music, listen music} and \textit{publish twitter} actions and
finally, items 5 highlight the {\sc Assertion} element, which is used to
describe the restrictions over each {\sc Action}.
\end{exampl}
\begin{figure}[ht!]
\centering
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/create_model-ServiceProcess.pdf}
\caption{\textit{$\pi$-ServiceProcess} Model Definition in $\pi$SOD-M Eclipse
Plugin.}
\label{fig:piserviceProcessToolModel}
\end{figure}
The designer must also configure the properties of each
\textit{$\pi$-ServiceProcess} model element to complete it by specifying all
constraints that compose the contracts and the application process. Figure
\ref{fig:piserviceProcessToolModelProperties} presents the properties of the
main model elements for our example scenario. These properties complement the
modeling of the execution flow.
\begin{figure}[ht!]
\centering
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/model-ServiceProcess.pdf}
\caption{\textit{$\pi$-ServiceProcess} Properties in $\pi$SOD-M Eclipse Plugin.}
\label{fig:piserviceProcessToolModelProperties}
\end{figure}
\begin{exampl}
Items 1 in Figure \ref{fig:piserviceProcessToolModelProperties}
describe the properties of a {\sc Fork Node}, which is associated with {\sc Edge Activities}
({\sc Control Flow} and {\sc Object Flow}) that connect the {\sc Action}
elements. For example, the {\sc Fork Node} \textit{fn\_toPublishMusic} has an
input edge (\textit{cf1}) and three output streams edges (\textit{cf2, cf3} and
\textit{cf5}). All these elements are {\sc Control Flow} entities (items 3).
Items 2 show the {\sc Action} properties, and items 4 and 5 describe a {\sc
Contract} and its {\sc Assertions} properties, respectively. A {\sc Contract}
has a \textit{name} and the information of an {\sc Action}.
{\sc Assertions} are {\sc Contract}'s child element. Each {\sc Contract} can
have many {\sc Assertions}. Each {\sc Assertion} has six properties, they are:
(i) \textit{AProperty}, which describes the runtime verification execution time,
(ii) \textit{description}, (iii) \textit{variable name}, (iv and v) a maximum
and minimum allowed to be checked (\textit{MaxValue} and \textit{MinValue}) and
the variable (vi) \textit{type}. In cases of non numeric types, \textit{e.g.}
\textit{Short Int, Flot} and \textit{Double}, only \textit{MinValue} property
value is considered property and\textit{MaxValue} is described as
\textit{null}.
\end{exampl}
\subsection{\textit{$\pi$-ServiceComposition} Models}
The goal of creating a \textit{$\pi$-ServiceComposition} model is to represent
the system service compositions and to reference the external services
that are called by the application. The designer receives the
\textit{$\pi$-ServiceProcess} model as input to generate the
\textit{$\pi$-ServiceComposition} model. After the
\textit{$\pi$-ServiceComposition} model is generated, the designer calls again
the model transformation component to generate the \textit{$\pi$-PEWS} model as
output (figure \ref{fig:sequenceDiagram}). The output model describes the system
specification in a specific platform.
To create the \textit{$\pi$-ServiceComposition} model, it is necessary to choose
the root object (\textit{Composition Service Model}) as the starting point of modeling,
during the process of creating a ``new .piservicecomposition file'' option. From
this point on, the model is created as a tree with its specific references.
Recall that the \textit{$\pi$-ServiceComposition} model is a refinement of
\textit{$\pi$-ServiceProcess} model, most elements are the same, except for {\sc
Business Collaborator, Policy, Rules} and {\sc Variable}. Thus, we will detail these elements in the
model editor description.
\begin{exampl}
Figure \ref{fig:piserviceCompositionToolModel} shows
the relationship of these elements for our example scenario. Each {\sc Action}
has an specific {\sc Business Collaborator} (item 3), this elements express the external service
provider, such as Facebook, Twitter or Bank. Another example of a {\sc Business
Collaborator} definition is a WSDL specification. The WSDL namespaces represents
a kind of {\sc Business Collaborator}. Besides {\sc Business Collaborator}
element, the main children root element (\textit{Composition Service Model})
are: {\sc Policy} and {\sc Service Activity} (items 1), and each {\sc Policy}
(items 2) is directly related with {\sc Service Activities}. From a {\sc Policy}
it is possible to create a set of {\sc Rules} (item 4). Figure
\ref{fig:piserviceCompositionToolModel} also presents an equivalent graphic
model of the application. In this figure, the \textit{Application}'s {\sc
Business Collaborator} presents only the main {\sc Actions} (\textit{buy music, listen music, publish
Facebook} and \textit{publish twitter}) that are related with a {\sc Service
Activities} that have one {\sc Policy}. The \textit{Application}'s {\sc Business
Collaborator} model is the same presented in the previous section
(\textit{$\pi$-ServiceProcess} model).
\end{exampl}
\begin{figure}[ht!]
\centering
\includegraphics[width=.99\textwidth]{chapters/implementation/figs/create-model-Serviceconposition.pdf}
\caption{\textit{$\pi$-ServiceComposition} Model Definition in $\pi$SOD-M
Eclipse Plugin.}
\label{fig:piserviceCompositionToolModel}
\end{figure}
The properties of \textit{$\pi$-ServiceComposition} elements are
configured as the same way the other two previous editors, and the properties
described in the \textit{$\pi$-ServiceComposition} meta-model
(Figure \ref{fig:servicecomposition}). Thus, to configure the properties of this model,
it is necessary simply choose the desired element and to modify its values.
\subsection{\textit{$\pi$-PEWS} Models}
The goal of creating a \textit{$\pi$-PEWS} model is to represent
the application in a specific platform. The designer receives the
\textit{$\pi$-ServiceComposition} model as input to generate the
\textit{$\pi$-PEWS} model. After the
\textit{$\pi$-PEWS} model be generated, the designer calls again
the model transformation component to generate the \textit{$\pi$-PEWS}
specification code as output (figure \ref{fig:sequenceDiagram}). This code will
be executed.
To create the \textit{$\pi$-PEWS} model, it is necessary to choose the root
object (\textit{PEWS Spec}) as the starting point of modeling. From
this point on, the model is created as a tree with its specific references.
The \textit{$\pi$-PEWS} meta-model is a representation of the
\textit{$\pi$-PEWS} language. Each \textit{$\pi$-PEWS} model is a
program/specification written in that language. The elements described in this
model represent parts of language's grammar constructs. Each entity in the model
represents a piece of \textit{$\pi$-PEWS} code.
\begin{figure}[ht!]
\centering
\includegraphics[width=.90\textwidth]{chapters/implementation/figs/create_model-PEWS.pdf}
\caption{\textit{$\pi$-PEWS} Model Definition in $\pi$SOD-M Eclipse Plugin.}
\label{fig:piPEWSToolModel}
\end{figure}
Figures \ref{fig:piPEWSToolModel} and \ref{fig:piPEWSToolModelProperties}
show how the elements that compose a \textit{$\pi$-PEWS} model can be specified
in our tool. This model is generated automatically from the
\textit{$\pi$-ServiceComposition} model.
\begin{figure}[ht!]
\centering
\includegraphics[width=.90\textwidth]{chapters/implementation/figs/create_model-PEWSProperties.pdf}
\caption{\textit{$\pi$-PEWS} Model Properties in $\pi$SOD-M Eclipse Plugin.}
\label{fig:piPEWSToolModelProperties}
\end{figure}
\textit{$\pi$-PEWS} is the last model generated before the code generation. At
this stage, it is spected that the designer proceeds with a general (manual)
check of consistency of the model. It is
important to remark that the environment alone does not replace the
modeling work required to design and develop services based applications.
\section{Extending the Environment}
\label{sec:extendingEnvironment}
Both, the $\pi$SOD-M environment and methodology can be extended, in order
to improve the components that describe and implement the methodology.
Extensions can be done in two different levels: (i) adding new models to the
existing infra-structure, and (ii) considering more abstract levels.
The extension may be in terms of language: new meta-models for other
languages can be described and coupled to the environment. The extension process
should take place as follows: new languages meta-models may be designed and
coupled in the environment architecture (such as BPEL, XAML, WSDL and XLANG).
After creating the desired meta-model, a mapping must be done. The mapping must
respect the \textit{$\pi$-ServiceComposition} meta-model. It is also necessary
to describe the rules for code generation, using a code generator engine such
Acceleo.
% Thus, new
% languages may be added for the environment improvements and for improve the
% methodology flexibility.
Considering the more abstract level of the methodology, new
meta-models can be described. For instance, the computing independent level
(CIM level) of SOD-M may be added to our methodology to describe the
requirements and business restrictions in terms of models.
% \begin{figure}[ht!]
% \centering
% \includegraphics[width=.95\textwidth]{chapters/implementation/figs/componentes.pdf}
% \caption{$\pi$SOD-M Environment Extension Components.}
% \label{fig:extension}
% \end{figure}
%
%
%
%
% Figure \ref{fig:extension} presents how the environment can be extended.
% The components that can be inserted components of CIM-to-PIM transformation
% models (item 1), new languages meta-models description (item 2) and
% components which represent the transformations between the
% \textit{$\pi$-ServiceComposition} model and each language (item 3). Thus, it is
% possible to extend the development environment of SOD-M. The extension must comply with
% the methodology concepts and environment architecture.
\section{Conclusion}
\label{sec:env_conclusion}
This chapter introduced the implementation
of the $\pi$SOD-M methodology environment. We also presented a representation
for our model description. The implementation includes all (i) meta-models of
$\pi$SOD-M, (ii) editors for each model for the applications being developed,
and (iii) the plugins for the transformations defined by the methodology. An
example was also presented to describe the environment features.
% This graphical representation can be better refined
% in future work, focussing on processing models or on the development of a
% visual tool for processing models and code generation.
% It was
% presented and described the general environment architecture, its properties, language used, its execution environment and how to extends the environment. The
% development environment of $\pi$SOD-M supports the methodology for a better use of their concept.
% Using the proposed graphical nomenclature, we described all the transformation
% rules that were used for the development of the environment's plugin responsible
% for the transformation of all $\pi$SOD-M models. This chapter also presented, in
% detail, all $\pi$SOD-M meta-models and editors, such as how to extend the
% environment and use it. Finally, after the design description, were
% introduced the execution environment example for the development each models in
% $\pi$SOD-M context.
| {
"alphanum_fraction": 0.7710087798,
"avg_line_length": 47.2565622354,
"ext": "tex",
"hexsha": "b5305e99ec459012a2a9bdc07e45971e31dbf8f6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "06e4ff673766438a1c6c61731036454e2087e240",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mmusicante/PlacidoConfArtigos",
"max_forks_repo_path": "WESOA2013/source/auxiliar/enviroment.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "06e4ff673766438a1c6c61731036454e2087e240",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mmusicante/PlacidoConfArtigos",
"max_issues_repo_path": "WESOA2013/source/auxiliar/enviroment.tex",
"max_line_length": 181,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "06e4ff673766438a1c6c61731036454e2087e240",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mmusicante/PlacidoConfArtigos",
"max_stars_repo_path": "WESOA2013/source/auxiliar/enviroment.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 14147,
"size": 55810
} |
\section{GUI}
The GUI contains following tabs:
\begin{itemize}
\item \textbf{Enrollment} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/enrollment.png}
\end{figure}
A new user may start his or her first step by clicking the
tab Enrollment. New users could provide personal information
such as name, sex, and age. then upload personal avatar to
build up their own data. Experienced users can choose from
the userlist and update their infomation.
Next the user needs to provide a piece of utterance for
the enrollment and training process.
There are two ways to enroll a user:
\begin{itemize}
\item \textbf{Enroll by Recording}
Click Record and start talking while click Stop to stop
and save.There is no limit of the content of the utterance,
whileit is highly recommended that the user speaks long enough
to provide sufficient message for the enrollment.
\item \textbf{Enroll from Wav Files}
User can upload a pre-recorded voice of a speaker.(*.wav recommended)
The systemaccepts the voice given and the enrollment of a speaker is done.
\end{itemize}
The user can train, dump or load his/her voice features after enrollment.
\item \textbf{Recognition of a user} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/recognition.png}
\end{figure}
A enrolled user present or record a piece of utterance,
the system tells who the person is and show user's avatar.
Recognition of multiple pre-recorded files can be done as well.
\item \textbf{Conversation Recognition Mode} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/conversation.png}
\caption{\label{fig:}}
\end{figure}
In Conversation Recognition mode, multiple users can have conversations
together near the microphone. Same recording procedure as above.
The system will continuously collect voice data, and determine
who is speaking right now. Current speaker's anvatar will show up
in screen; otherwise the name will be shown. The conversation
audio can be downloaded and saved.
There are some ways to visualize the speaker-distribution in the
conversation.
\begin{itemize}
\item \textbf{Conversation log}
A detailed log, including start time, stop time,
current speaker of each period is generated.
\item \textbf{Conversation flow graph}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/conversationgraph.png}
\end{figure}
A timeline of the conversation will be shown by a number of
talking-clouds joining together, with start time, stop time
and users' avatars labeled. Different users are presented
with different colors.The timeline will flow to the left dynamically
just as time elapses. The visualization of the conversation is done
in this way. This functionality is still under development.
\end{itemize}
\end{itemize}
| {
"alphanum_fraction": 0.7079872204,
"avg_line_length": 39.6202531646,
"ext": "tex",
"hexsha": "bd183f3b70cfc635bdfc4365332bd7284fb8bc11",
"lang": "TeX",
"max_forks_count": 315,
"max_forks_repo_forks_event_max_datetime": "2022-03-29T08:13:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-21T00:06:00.000Z",
"max_forks_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "juliia5m/knu_voice",
"max_forks_repo_path": "doc/Final-Report/gui.tex",
"max_issues_count": 91,
"max_issues_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1",
"max_issues_repo_issues_event_max_datetime": "2021-05-19T08:51:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-03-19T09:25:23.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "juliia5m/knu_voice",
"max_issues_repo_path": "doc/Final-Report/gui.tex",
"max_line_length": 82,
"max_stars_count": 717,
"max_stars_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "juliia5m/knu_voice",
"max_stars_repo_path": "doc/Final-Report/gui.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T12:45:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-03T15:25:46.000Z",
"num_tokens": 727,
"size": 3130
} |
\section{Measurement Model}
The measurement model serves the goal of allowing us to retrieve the equivalent measurement values for a given state. This is necessary for calculating the Innovation, and thus the Kalman Gain, which allows us to update the state in the best way possible by taking both the predictions and the measurements into account. This use implies the necessity of being able to retrieve the measurements from the state, which is why we include the wheel velocities $v_l$ and $v_r$ in the state despite not actually using these values for navigation; without including them directly, it is hard to recover them from the remaining state variables.
The desired behavior is
\begin{equation}
\boldsymbol{z}_{\textrm{equiv}} = \boldsymbol{H} \cdot \boldsymbol{\hat{x}}_{n,n-1}
\end{equation}
In words, we multiply $\boldsymbol{H}$ by the prediction for the current state made in the previous timestep to recover an equivalent measurement. Since our measurement vector is simply a subset of the state, our measurement model is
\begin{equation}
\boldsymbol{H} =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1
\end{pmatrix}
\end{equation}
We can then calculate our innovation,
\begin{equation}
\boldsymbol{y}_{n} = \boldsymbol{z}_{n} - \boldsymbol{z}_{\textrm{equiv}}
\end{equation} | {
"alphanum_fraction": 0.6916943522,
"avg_line_length": 57.8846153846,
"ext": "tex",
"hexsha": "8c4b4144a8e2930c03e066b0f8ff121db9b3c687",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-08-23T05:03:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-29T05:21:18.000Z",
"max_forks_repo_head_hexsha": "906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SoonerRobotics/igvc_software_2022",
"max_forks_repo_path": "docs/IGVC EKF Derivation/sections/measurementmodel.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd",
"max_issues_repo_issues_event_max_datetime": "2021-07-17T01:02:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-22T01:53:48.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SoonerRobotics/igvc_software_2022",
"max_issues_repo_path": "docs/IGVC EKF Derivation/sections/measurementmodel.tex",
"max_line_length": 636,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SoonerRobotics/igvc_software_2022",
"max_stars_repo_path": "docs/IGVC EKF Derivation/sections/measurementmodel.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-13T23:31:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-07T14:56:56.000Z",
"num_tokens": 461,
"size": 1505
} |
% $Id$ %
\subsection{Jackpot}
\screenshot{plugins/images/ss-jackpot}{Jackpot}{img:Jackpot}
This is a jackpot slot machine game. At the beginning of the game you
have 20\$. Payouts are given when three matching symbols come up.
\begin{btnmap}
\PluginSelect
\opt{HAVEREMOTEKEYMAP}{& \PluginRCSelect}
& Play\\
\PluginCancel
\opt{HAVEREMOTEKEYMAP}{& \PluginRCCancel}
& Exit the game \\
\end{btnmap}
| {
"alphanum_fraction": 0.7018779343,
"avg_line_length": 26.625,
"ext": "tex",
"hexsha": "a7dd83f69955b1a80fd91b81ff63aa4591448989",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2020-11-04T04:30:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-21T13:58:13.000Z",
"max_forks_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC",
"max_forks_repo_path": "manual/plugins/jackpot.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2",
"max_issues_repo_issues_event_max_datetime": "2018-05-18T05:33:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-07-04T18:15:33.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC",
"max_issues_repo_path": "manual/plugins/jackpot.tex",
"max_line_length": 69,
"max_stars_count": 24,
"max_stars_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC",
"max_stars_repo_path": "manual/plugins/jackpot.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-05T14:09:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-03-10T08:43:56.000Z",
"num_tokens": 131,
"size": 426
} |
\chapter{What is Lisp?}
In order to understand a programming language you need to know something of
its history, and of the context in which it has developed. So you need to
\begin{wrapfigure}{l}{2.5in}
{\centering
\includegraphics[width=2.4in]{john_mccarthy_by_null0_via_flickr.eps}}
\caption{John McCarthy (photo courtesy null0 on flickr)}
\end{wrapfigure} be aware of other languages it has been in competition with, and trace
influences. In the case of Lisp this leads to a need to make an amazingly
broad survey of the entire history of computers, since Lisp was one of the
very earliest programming languages and its influence since has been huge.
Lisp was invented by John McCarthy in the late 1950's
and was originally envisaged for use in Artificial Intelligence. Indeed the very
term ``Artificial Intelligence'' had been coined by McCarthy at about the
time that his first thoughts the led to Lisp were arising. While AI and
related areas have continued to be important for the language, it has
found a new core area as an embedded scripting language to support
various large programs, including editors, computer aided design packages,
musical notation systems and accounting software! Serious widely used
applications (for instance the Yahoo Store) run in Lisp, but of course
their users just get to see their behaviour not their Lispy internals.
The Wikipedia entry for Lisp gives a list of other programming languages
derived from or influences by Lisp: CLU, Dylan, Falcon, Forth, Haskell,
Io, Ioke, JavaScript, Logo, Lua, Mathematica, MDL, ML, Nu, OPS5, Perl,
Python, Qi, Rebol, Racket, Ruby, Smalltalk and Tcl. Lisp enthusiasts will
then wish to follow the references through to see what Lisp indirectly
influenced. So for instance the languages that Smalltalk has had an
impact on include Objective-C, Self, Java, PHP 5, Logtalk, Dylan,
AppleScript, Lisaac, NewtonScript, Groovy, Scala, Perl 6,
Common Lisp Object System, Fancy and Dart. Obviously many of these names
will not be familiar, either because they are now defunct or because they
are only used in specialist contexts, but there may be enough
easily recognisable languages to show that the Lisp legacy has been
truly broad.
The language design for Lisp dates from 1958 and built on ideas that
John McCarthy had been working on over the previous couple of years.
The first implementation was completed 1959. This
original work was done on an IBM 704 computer.
\begin{figure}
\begin{center}
\includegraphics[width=4.5in]{ibm-704-columbia.eps}
\end{center}
\caption{and IBM 704 computer. This photo is from the Columbia University
Archive.}
\end{figure}
It could execute
up to 40,000 instructions per second. At the time of their introduction
IBM 704 computers had 4K (36-bit) words of memory -- in other words
18 Kbytes of memory. By the time that model was withdrawn, and transistorised
computers took over, the available memory option had grown to 32K words
(144 Kbytes). Most such computers were rented (possibly for ``under \$50000
per month'') but to buy one would have set you back several million (1950s)
dollars. Inflation since the mid 1950s means that any prices from then
need to be multiplied by about 8 to get a realistic figure for today.
% See http://www.columbia.edu/cu/computinghistory/1965.html for
% photos of IBM704 that might be readily licensable.
If I compare the IBM 704 from 1957 with one of the cheapest options
available in 2012\footnote{The Raspberry Pi model B} today's machine
gas not far short of 2000 times as much memory, runs perhaps 15000 times
faster and the cost has gone down by a factor of perhaps half a million!
There are many variants on time-lines for the development of programming
languages. Some people treat the ``idea'' as setting a date that
establishes priority, some look at the initial reasonably complete
implementation, while yet others view publication as the key milestone.
But however you look at things you will find that FORTRAN came first, with
Lisp and Algol almost in a tie for second place and COBOL third among
languages that have in any sense lasted.
These early languages have all obviously had influences on developments
that have followed. However there is scope for disagreement and interpretation
about just where the links have been Strong enough to be worth noting. So
what indicate here is my view, slanted by the fact that I am wishing to
promote Lisp at present.
FORTRAN has as its linguistic successors FORTRAN IV, '66, '77, '90, '95 and
now 200, 2003 and 2008. Each of those dates indicates a formal international
standard, and the language has clearly remained vibrant and supported. However
about the only language that can really be held to have been derived from
or even dramatically influences by FORTRAN was PI/I (now no longer in use). It
could perhaps be said that FORTRAN should take it share of the blame for
BASIC. However even if the FORTRAN language has not proved influential its
implementation has been. In particular there are two crucial legacies from
it. The first is the concept of being able to compile bodies of code
independently and form a library containing code that you can re-use with
your next complete program. The second is the technology for seriously
clever optimising compilers: ideas pioneered in FORTRAN form the basis
of a large proportion of modern compilers.
Algol was originally the International Algorithmic Language. Algol 58 was soon
succeeded by Algol 60, which might reasonably be characterised as the
first ``modern'' programming language. Algol as such has fallen out of
use, but there is a rich stream of languages that are unambiguously
successors. In one direction Pascal took Algol and emphasised particular
structures styles of programming. Pascal has Modula and Delphi among its
more obvious successors. In a parallel set of developments CPL extended
Algol with a view to becoming a universal language capable of supporting
all areas of computation. The CPL project spawned BCPL (a severe simplification
of CPL initially mainly for use in writing the CPL compiler), and this
let via B to the major language C. C++ builds on and extends C, and
Java can be seen as a reaction to C++, and yet beyond that C\# and Scala
are both reactions to Java. All in all Algol has been a truly major
influence on program language syntax, and on the manner in which
variables are declared and the scope within which they may be accessed.
That leaves Lisp. As a language Lisp can be viewed as being
something of a living fossil, inhabits niches and being used in slightly
specialist areas. Well actually Lisp has always been something of a language
for special purposes! With both Fortran and Algol there were substantial
changes to syntax made between the original languages and the ones
that are their current descendants. Lisp on the other hand still looks
very much as it did. Various dialects and variants have arisen (and in many
cases then vanished) but they all share a truly distinctive syntax where
programs are held together with lots of parentheses. A modern Lisp
is liable to be bigger and meaner and have many more teeth than the
primordial Lisp 1.5, but the family resemblance will be strong. One reason for
this is that Lisp has never had much special syntax for anything -- it
just uses its standard parenthesis-rich notation. This leads to huge
flexibility in that adding new features will not conflict with existing
notation. One way in which this has been described is attributed to
Joel Moses and appears in the archives in a number of forms. There are
also assertions that Moses denies having ever said it -- but that does not
that seriously undermine its essential truth:
{\em \begin{quotation}
A language such as APL is like a diamond. It is perfectly symmetric,
and shines brightly. However, if you wish to add a new feature to the
language, the symmetry is smashed, and the diamond cracks and shatters.
Lisp, on the other hand, is a ball of mud. Essentially shapeless,
you can easily add new extensions and ideas, and all you get is a
larger ball of mud ready and able to accept more and more.
Lisp is infinitely extensible: you can add new functions that have
exactly the same importance to the system as the built-in commands,
or even redefine the built-in commands. You can, if you feel that
way inclined, redefine the whole syntax. Imagine the possibilities
if C allowed you to redefine the while loop, or Fortran let you
introduce exactly that form of DO loop that you required
for your application.
\end{quotation}}
The above is supposed to be alluded to in Steele and Gabriel's paper
on Lisp Evolution\cite{Evolution}, but it is not visible in the copies
I have located to date! The version quoted comes via Padget\cite{Padget}.
The key features that make Lisp what it is are
\begin{description}
\item[Primitive syntax:] In Lisp pretty well the only syntax used is
one where the name of a function and the sequence of its arguments are
enclosed in parentheses. Thus $3*x + 5$ is expressed as
{\tx (plus~(times~3~x)~5)}. This scheme favours consistency over
conciseness. When Lisp was first being invented it was imagined that
a more conventional human-friendly notation would be provided, but the
``all brackets'' notation caught on and has remained basically
unchanged.
\item[Lists as data:] The sorts of data you can work with in Lisp include
numbers and strings, but also symbols and lists of items. Lists can be nested
arbitrarily. List structures are used to model records, trees and pretty
well any other sort of data you might find in a more modern language. This
means that to get started with Lisp you only have to think about a few simple
sorts of data. Alan Perlis\cite{perlis} suggested that ``It is better to have 100
functions operate on one data structure than 10 functions on 10
data structures'' and Lisp follows this recommendation.
\item[Code can be treated as data:] It goes beyond coincidence that the syntax
of Lisp uses parentheses and its notation for list data is the same: Lisp code
can be regarded as Lisp data and vice versa. Thus the language makes it
easy for programmers to think about code analysis or synthesis. Modern
languages such as Java have introduces standard ways in which one piece
of code can inspect another (calling if {\em reflection}): Lisp has
always made that not just possible but natural and easy.
\item[Types only matter at run-time:] Lisp will complain if you try to
add a number to a string, or to otherwise breach good taste as regards the
use of its data-types. But this complaint only happens when you run your code.
There is typically no checking made at the time you define functions or
otherwise present your code to the system. Depending on your point of view
this either gives you a lot of flexibility or represents a serious risk of
allowing bugs to remain undetected. If you are using Lisp as the inner basis
for the implementation of a new programming language the flexibility
arguments can really win.
\item[Mostly Functional:] A complete Lisp program is created by defining
a collection of little functions. A tradition and style in Lisp has grown
up where often all of these really behave as functions in the mathematical
sense of that word. They accept arguments and deliver a result, and that
result only depends on the arguments not on any context or history.
This style of programming has been found useful by those who wish to
formalise things to the level of producing mathematical proofs of their
code, but along with the use of recursion it turns out to lead to
nice human-comprehensible ways of expressing the solution to many
programming tasks.
\item[Automatic storage management:] Lisp programs create new lists,
process them and eventually move on such that they do not need that data
any more. It is fundamental to Lisp that the system takes care of recycling
discarded data and hence the programmer never needs to worry about it. This
is an arrangement that has only rather recently found its way into
other widely-used languages.
\item[An extensible language:] When you start up Lisp and define a collection
of new functions they take their place alongside all the existing ones that
were built into the system from the start. If you have a fragment of Lisp
code such as {\tx(obscure\_function\_name~argument\_1)} you are just using
the standard Lisp notation for a function invocation. There is no distinction
at all made as to whether {\tx obscure\_function\_name} was a built-in function
or one you had defined yourself. So as you define new functions you can often
think of what you are doing as extending the language. If you need new
capabilities from the operating system you can often merely implement
a function in Lisp to access them, and again once it is there its status is
just the same as that of all other Lisp functions. Compare this with other
typical programming languages where for instance characters such
as ``{\tx +}'' and words such as ``{\tx while}'' probably have rather
special status.
\end{description}
There are some things that Lisp perhaps does not emphasise. Well on saying
that it will perhaps be safer to say ``Lisp as discussed here does not
emphasise'' since there can be particular Lisp implementations with all sorts
of specialisms! The topics listed here could sometimes be represented
as disadvantages or limitations of Lisp, and so in each case I will
indicate the way in which a dedicated Lisp fanatic turns the point on its head
to reveal a silver lining.
\begin{description}
\item[Human-readable syntax:] There are some who believe that the Lisp notation
is at best antiquated and could reasonably be described as barbaric. Valid
Lisp code can end up looking really bad. For instance and as a very small
example here is a factorial function written first in Lisp and them
in the tidy language SML\cite{SML}.
{\small\begin{verbatim}
(de factorial (x) (cond ((equal x 0) 1) (t (times
x (factorial (difference x 1))))))
\end{verbatim}}
\noindent and then
{\small\begin{verbatim}
fun factorial 0 = 1
| factorial n = n * factorial(n-1);
\end{verbatim}}
The Lisp version is bulkier, it uses long-winded names for
even basic arithmetic operations and ensuring that you get exactly
the right number of close brackets (six in this case) at the end could
confuse almost anybody.
There are several responses that a Lisp fanatic will give to this. The
first is that Lisp code should be properly laid out, and that a competent
text editor can help with both indentation and bracket matching so that
neither are a real burden on any serious user. There will be shorter
functions to cover some common cases, so the Lisp code should have been
shown as
{\small\begin{verbatim}
(de factorial (x)
(cond
((zerop x) 1)
(t (times x (factorial (sub1 x))))))
\end{verbatim}}
This is still perhaps bulky, buts its structure is now easy to see. Indentation
and bracket counting are so easy to mechanise that it is silly to fuss about
them -- but anybody who can not cope with the bracketed notation could layer
a parser on top of Lisp so that their favourite syntax gets mapped onto
Lisp's internal form. Almost any compiler will have an internal form
of parse-trees that is essentially very much like native Lisp code anyway
so this is not a big deal. As a concrete example of a case where this
approach has been taken, the Reduce algebra system\cite{Reduce} has its
own language, known as {\tx rlisp}, used in exactly this way -- here is our
factorial example written in {\tx rlisp}.
{\small\begin{verbatim}
symbolic procedure factorial x;
if x = 0 then 1
else x * factorial(x-1);
\end{verbatim}}
\item[Absolute code portability:] There are many programming languages
where if you write a program you can expect (or at least hope!) it will
run everywhere without modification. Java is a fine example of a language
where this virtue was made a key marketing point. With Lisp there is
an international standard -- Common Lisp\cite{ANSILisp} -- but if
you fetch and install an implementation of Common Lisp you will find that
at some stage you will need to find out about the limitations and the
extensions applicable to the version you selected. The earlier and
perhaps more primitive Standard Lisp\cite{StdLisp1}\cite{StdLisp2} are the
basis for the \vsl system used here. Lisp is used as an embedded extension
language for the widely-used {\tx emacs} editor, and a web-search will
easily reveal a number of other versions with subtly or crudely differing
capabilities.
The Lisp fanatic response to this is that the exact names that a Lisp system
uses for some operation is very much a secondary issue. A few Lisp
definitions can provide whatever {\em name} you need provided that the
underpinning capability is present. So for instance (and as an approximation
to what would really be needed) you could go
{\small\begin{verbatim}
(de + (a b) (plus a b))
\end{verbatim}}
\noindent and subsequently use ``{\tx +}'' rather then ``{\tx plus}'' to
add numbers. If you thing of building Lisp code as extending Lisp then
if you start from a slightly different Lisp dialect then you merely
start with a few definitions that are there to provide whatever compatibility
you need. This attitude then reduces the whole discussion to merely a stand-off
between those who believe that a ``proper'' Lisp should have almost every
capability that is in any way supportable by your computer and those who
prefer a smaller simpler system that just provides enough for the application
area that interests them.
\item[Strong data types:] Software Engineers will emphasis the benefits of
strict compile-time type checking as a crucial aid to the construction of
reasonably reliable code. A Lisp fanatic\footnote{There are some languages
that tend to breed or collect fanatics, and Lisp is undoubtedly one such.}
responds by citing the convenience of Lisp for rapid prototyping. They can
point out that Lisp's natural ability to inspect Lisp code (because code and
data share a single representation) leads to ease in creating custom tools to
analyse code, and that at least sometimes that has been deployed to help
with code robustness. And finally they will point to the way in which
all Lisp data has a printed representation, and so tracing and debugging
Lisp code will be so much nicer than the corresponding experience when
using other languages -- so a few extra bugs may not matter that much!
\item[Performance:] Is Lisp a ``fast'' programming language? Well one thing
is unquestionably the case: The \vsl implementation is {\em slow} compared
to most other languages you might use -- but that is a consequence of it
emphasising conciseness over almost everything else. If Lisp code is written
without regard to performance it will probably not run especially fast,
but masters of the language using top-end Lisp implementations can coax
benchmarks in Lisp to beat almost any other language. Looking at the
raw power of even the cheapest computers today it seems reasonable to
accept the Lisp realist's view that most Lisp code will run somewhat slower
than code written in C or Fortran, but that these days it will be truly
rate to find a situation where this matters.
\item[Linguistic correctness:] From the very start Lisp has been a pragmatic
language providing capabilities to let people ``get the job done''. In its
early days it was towards the forefront of adhering to principles of
correctness. One early illustration of this attitude was the Ph.D. work
of Michael Gordon\cite{MJCG} who noted that a relatively obscure
feature ({\tx label}) in Lisp 1.5 had a semantically inconvenient (and
so arguably incorrect) definition. However mainstream Lisps have continued
to include various imperative and destructive capabilities, and to varying
extents have left in ugliness. Scheme\cite{Scheme} is a spin-off language
that made a serious attempt to tidy things up, and Common Lisp insists (for
instance) that interpreted and compiled code should behave in exactly the
same way -- while other systems accept incompatibilities over this and over
the dynamic scoping of variable bindings so as to achieve simpler or
faster implementations.
\item[Object Orientation:] The Common Lisp Object System is a part of ANSI
Common Lisp, however it is rather substantially different from the Object
models present in other programming languages. Lisp fanatics naturally
view it is greatly superior! Perhaps the most specific claims for it will
be that its meta-object protocol provides exceptional flexibility when
developing and them maintaining code. To others its support for multiple
inheritance and the fact that it does not enforce encapsulation mean that
many of the features that could make Object Orientation a foundation for
reliable code and missing from it. So on the spirit of Lisp being a ball
of mud, many versions of it have embraced object orientation, but it still
seems fair to suggest that the core ways of using Lisp are closer to being
Functional then Object Oriented.
\item[Real-time and Windowed:] Some (but not all) implementations of Common Lisp
support the Common Lisp Interface Manager which is a window managing
library allowing programmers to create modern user interfaces. Other systems
may have their own support for events and windows. Again in the spirit of the
ball of mud one can never say that Lisp is incapable of anything! However
historically this is not one of the areas of Lisp key competence, and
even more not one where smooth portability from one Lisp to another can
be guaranteed. In the early years Lisp was heavily used in Artificial
Intelligence work, specifically including the control of robots. For these
any pause in its working could be extremely visible as a pause or judder
in the robot's movements. This led to severe concern about real-time
responsiveness, and some Lisp implementations put much effort into
achieving it. As computers have become (much) faster most of the worries
on this front have faded, but it is still the case that if you need
strict real-time responses you will need not only a specialist Lisp
implementation but a specialist real-time operating system for it to run
on top of.
\end{description}
Lisp was originally envisaged as a notation for writing programs in the
area of Artificial Intelligence. While its use spread some way
beyond that, it is still the case that some of the most striking
examples of big programs written in Lisp have been to manipulate
algebraic formulae, perform logical reasoning, control autonomous
robots and analyse natural language. Since then it has been applied in
almost all areas. As a final characterisation of its status and importance
consider Greenspun's Tenth Rule of Programming\cite{greenspun}:
{\em \begin{quotation}
Any sufficiently complicated C or Fortran program contains an ad hoc,
informally-specified, bug-ridden, slow implementation of half of Common Lisp.
\end{quotation}}
so understanding Lisp makes sense for when you end up needing to write
or maintain that complicated other program that is notionally written
in a quite different language. And \vsl certainly contains a slow
implementation of at least parts of Common Lisp.
| {
"alphanum_fraction": 0.7984339371,
"avg_line_length": 60.0594315245,
"ext": "tex",
"hexsha": "948fbc94217b811c7916b05bb2d49853650c7bab",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0d502abe795540a3dfc99d43726d3fc29a5e6e5d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "squarePenguin/parvsl",
"max_forks_repo_path": "docs/whatislisp.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0d502abe795540a3dfc99d43726d3fc29a5e6e5d",
"max_issues_repo_issues_event_max_datetime": "2019-03-25T17:02:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-03-25T17:02:38.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "squarePenguin/parvsl",
"max_issues_repo_path": "docs/whatislisp.tex",
"max_line_length": 87,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0d502abe795540a3dfc99d43726d3fc29a5e6e5d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "squarePenguin/parvsl",
"max_stars_repo_path": "docs/whatislisp.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5171,
"size": 23243
} |
\subsection{Graphing the Results}
\label{graphing}
In \texttt{cplint} on SWISH you can draw graphs
for visualizing the results either with \href{http://www.c3js.org/}{C3.js} or with \href{https://www.r-project.org/}{R}. Similar predicates are available for the two methods.
There are two types
of graphs: those that represent individual probability values with a bar chart and those that
visualize the results of sampling arguments.
\subsubsection{Using C3.js}
You can draw the probability of a query being true and
being false as a bar chart using the predicates
\begin{verbatim}
bar1(+Probability:float,-Chart:dict) is det
bar(+Probability:float,-Chart:dict) is det
bar(+Successes:int,+Failures:int,-Chart:dict) is det
argbar(+Values:list,-Chart:dict) is det
\end{verbatim}
They return a dict for rendering with C3.js as a bar chart:
the first returns bar chart with
a single bar for the probability, the second a chart with
bar for the probability and a bar for one minus the probability,
the third a chart with
a bar for the number of successes and a bar for the number of failures, and
the fourth a chart with
a for bar each value, where \verb|Values| is a list of couples \verb|V-N| where
\verb|V| is the value and \verb|N| is the number of samples
returning that value.
To render C3.js charts you have to include
\begin{verbatim}
:- use_rendering(c3).
\end{verbatim}
before \verb|:- pita.|
You can also use the \verb|bar(-Chart:dict)| option of many predicates
as in
\begin{verbatim}
?- prob(heads(coin),biased(coin),P,[bar(Chart)]).
\end{verbatim}
\verb|P| will be instantiated with a
chart with
a bar for the probability of \verb|heads(coin)| true and a bar for the probability of \verb|heads(coin)| false,
given that \verb|biased(coin)| is true.
Another example is
\begin{verbatim}
?- mc_prob(heads(coin),P,[bar(Chart)]).
\end{verbatim}
that returns a chart representation of the probability.
\begin{verbatim}
?- mc_sample(heads(coin),1000,P,[bar(Chart)]).
\end{verbatim}
returns in \verb|Chart| a diagram with one bar for the number of successes and
one bar for the number of failures.
The options of
\verb|mc_sample_arg/5|, \verb|mc_sample_arg_first/5|, \verb|mc_mh_sample_arg/6|, \verb|mc_rejection_sample_arg/6|,
can be used for visualizing the results of sampling arguments.
An example is
\begin{verbatim}
?- mc_sample_arg(reach(s0,0,S),50,S,ValList,[bar(Chart)]).
\end{verbatim}
of \href{http://cplint.eu/example/inference/markov_chain.pl}{\texttt{markov\_chain.pl}}.
The same result can be achieved with
\begin{verbatim}
?- mc_sample_arg(reach(s0,0,S),50,S,ValList),argbar(ValList,Chart)
\end{verbatim}
Drawing a graph is particularly interesting when
sampling values for continuous arguments of goals.
In this case, you can use the samples to draw the
probability density function of the argument.
The predicate
\begin{verbatim}
histogram(+List:list,-Chart:dict,+Options:list) is det
\end{verbatim}
draws a histogram of the samples in \verb|List| that must be a list of couples of the form \verb|[V]-W| or \verb|V-W|
where \verb|V| is a sampled value and \verb|W| is its weight. This is the format of the list of samples returned by argument sampling predicates.
The predicate
\begin{verbatim}
density(+List:list,-Chart:dict,+Options:list) is det
\end{verbatim}
draws a line chart of the density of the samples in \verb|List| that must take the same form as for \verb|histogram/3|.
In \verb|histogram/3| and \verb|density/3| \verb|Options| is a list of options, the following are recognised: \begin{itemize}
\item \verb|min(+Min:float)|
the minimum value of domain, default value the minimum in \verb|List|
\item \verb|max(+Max:float)|
the maximum value of domain, default value the maximum in \verb|List|
\item \verb|nbins(+NBins:int)|
the number of bins for dividing the domain, default value 40
\end{itemize}
In this way you can specify the limits and the number of intervals of the $X$.
The predicate
\begin{verbatim}
densities(+PriorList:list,+PostList:list,-Chart:dict,
+Options:list) is det
\end{verbatim}
draws a line chart of the density of two sets of samples, usually
prior and post observations. The samples in \verb|PriorList| and \verb|PostList|
can be either couples \verb|[V]-W| or \verb|V-W| where \verb|V| is a value and \verb|W| its weight.
The same options as for \verb|histogram/3| and \verb|density/3| are recognized.
For example, the query
\begin{verbatim}
?- mc_sample_arg(value(0,X),1000,X,L0,[]),
histogram(L0,Chart,[]).
\end{verbatim}
from \href{http://cplint.eu/example/inference/gauss_mean_est.pl}{\texttt{gauss\_mean\_est.pl}},
takes 1000 samples of argument \verb|X| of \verb|value(0,X)| and draws the density of the samples using an histogram.
Instead
\begin{verbatim}
?- mc_sample_arg(value(0,Y),1000,Y,L0,[]),
mc_lw_sample_arg(value(0,X),
(value(1,9),value(2,8)),1000,X,L),
densities(L0,L,Chart).
\end{verbatim}
from \href{http://cplint.eu/example/inference/gauss_mean_est.pl}{\texttt{gauss\_mean\_est.pl}}
takes 1000 samples of argument \verb|X| of \verb|value(0,X)| before and after observing
\verb|(value(1,9),value(2,8)| and draws the prior and posterior densities of the samples using a line chart.
Predicates \verb|histogram/3|, \verb|density/3| and \verb|densities/4| each have a version with one
argument less that is equivalent to the predicate called with an empty option list.
\subsubsection{Using R}
You have to load library \texttt{cplint\_r} (a SWI-Prolog pack) with
\begin{verbatim}
:- use_module(library(cplint_r)).
\end{verbatim}
Then you can use predicates
\begin{verbatim}
bar_r/1
bar_r/2
argbar_r/1
\end{verbatim}
that work as their C3.js counterpart but do not return the graph as an argument as the graph is
printed with a different mechanism.
You also have
\begin{verbatim}
histogram_r(+List:list,+Options:list) is det
\end{verbatim}
that works as \texttt{histogram/3}.
\begin{verbatim}
density_r(+List:list) is det
\end{verbatim}
is like \texttt{density/3} with the number of bins is determined
by R.
\begin{verbatim}
densities_r(+PriorList:list,+PostList:list) is det
\end{verbatim}
is like \texttt{densities/3} with the number of bins is determined
by R.
See \href{http://cplint.eu/example/inference/gauss_mean_est_R.pl}{\texttt{gauss\_mean\_est\_R.pl}} for an example of use of these predicates.
| {
"alphanum_fraction": 0.7538582677,
"avg_line_length": 38.7195121951,
"ext": "tex",
"hexsha": "54b98a7c373a873709079f83fe2511ba95aa6e27",
"lang": "TeX",
"max_forks_count": 20,
"max_forks_repo_forks_event_max_datetime": "2021-11-24T09:35:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-11-02T15:09:39.000Z",
"max_forks_repo_head_hexsha": "654dd2d84f31bf1444c090bc5f33b8babfc9c2da",
"max_forks_repo_licenses": [
"Artistic-2.0"
],
"max_forks_repo_name": "JanWielemaker/cplint",
"max_forks_repo_path": "docs/old_docs/graphics.tex",
"max_issues_count": 25,
"max_issues_repo_head_hexsha": "654dd2d84f31bf1444c090bc5f33b8babfc9c2da",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T13:37:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-09-14T05:24:08.000Z",
"max_issues_repo_licenses": [
"Artistic-2.0"
],
"max_issues_repo_name": "JanWielemaker/cplint",
"max_issues_repo_path": "docs/old_docs/graphics.tex",
"max_line_length": 174,
"max_stars_count": 55,
"max_stars_repo_head_hexsha": "654dd2d84f31bf1444c090bc5f33b8babfc9c2da",
"max_stars_repo_licenses": [
"Artistic-2.0"
],
"max_stars_repo_name": "JanWielemaker/cplint",
"max_stars_repo_path": "docs/old_docs/graphics.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-04T02:27:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-07-10T23:18:08.000Z",
"num_tokens": 1834,
"size": 6350
} |
\section{Symbiosis}\label{sec:symbiosis-soruce-code}
\begin{Listing}[H]
\begin{lstlisting}[xleftmargin=3em]
<!DOCTYPE html>
<html lang="en">
<head>
<title>Symbiosis</title>
<style>
video {
height: 100vh;
width: 100vw;
object-fit: cover;
}
button {
width: 100px;
height: 50px;
background: red;
position: absolute;
color: white;
left: calc(50vw - 25px);
bottom: 50px;
}
</style>
</head>
<body>
<video></video>
<button>Record</button>
</body>
</html>
\end{lstlisting}
\caption{symbiosis.html}
\label{lst:symbiosis-html}
\end{Listing}
\begin{Listing}[H]
\begin{lstlisting}[xleftmargin=3em]
import {Mitosis} from 'mitosis';
const videoEl = document.querySelector('video');
const recordEl = document.querySelector('button');
const mitosis = new Mitosis();
const onStreamAdded = (streamEv) => {
if (streamEv.type === 'added') {
videoEl.srcObject = streamEv.stream;
videoEl.play();
}
};
const startStream = () => {
navigator.mediaDevices.getUserMedia({
video: true,
audio: false
}).then(
(stream) => {
mitosis.getStreamManager().setLocalStream(stream);
});
};
mitosis
.getStreamManager()
.observeChannelChurn()
.subscribe(
channelEv => channelEv.value
.observeStreamChurn()
.subscribe(onStreamAdded)
);
recordEl.addEventListener('click', startStream);
\end{lstlisting}
\caption{symbiosis.js}
\label{lst:symbiosis-js}
\end{Listing} | {
"alphanum_fraction": 0.6533513879,
"avg_line_length": 18.9358974359,
"ext": "tex",
"hexsha": "e7cf5d1240484be889b4818ceb6c19a52f1b828b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "962809e03db2b91c30301aa8238b09d256d1569a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "auxdotapp/report",
"max_forks_repo_path": "appendix/symbiosis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "962809e03db2b91c30301aa8238b09d256d1569a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "auxdotapp/report",
"max_issues_repo_path": "appendix/symbiosis.tex",
"max_line_length": 56,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "962809e03db2b91c30301aa8238b09d256d1569a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "auxdotapp/report",
"max_stars_repo_path": "appendix/symbiosis.tex",
"max_stars_repo_stars_event_max_datetime": "2019-02-21T18:26:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-21T18:26:56.000Z",
"num_tokens": 418,
"size": 1477
} |
\subsubsection{\stid{3.06} PETSc-TAO} \label{subsubsect:petsc}
\paragraph{Overview}
Algebraic solvers (generally nonlinear solvers that use sparse linear solvers) and integrators form the
core computation of many numerical simulations. No scalable ``black box'' sparse solvers or integrators
work for all applications, nor are there single implementations that work well for all problem sizes.
Hence, algebraic solver and integrator packages provide a wide variety of algorithms and implementations
that can be customized for the application and range of problem sizes. PETSc/TAO~\cite{petsc:homepage,petsc-man}
is a widely used numerical library for the scalable solution of linear, nonlinear, and variational systems,
for integration of ODE/DAE systems and computation of their adjoints, and for numerical optimization.
This project focuses on three topics: (1) partially matrix-free scalable solvers to efficiently use
many-core and GPU-based systems; (2) reduced synchronization algorithms that can scale to larger
concurrency than solvers with synchronization points; and (3) performance and data structure
optimizations for all the core data structures to better utilize many-core and GPU-based
systems as well as provide scalability to the exascale systems.
The availability of systems with over 100 times the processing power of today's machines compels the utilization
of these systems not just for a single ``forward solve'' (as discussed above), but rather within a tight loop
of optimization, sensitivity analysis (SA), and uncertain quantification (UQ). This requires the implementation
of a new scalable library for managing a dynamic hierarchical collection of running scalable simulations, where
the simulations directly feed results into the optimization, SA, and UQ solvers. This library, which we call
libEnsemble, directs the multiple concurrent ``function evaluations'' through the tight coupling and
feedback. This work consist of two parts: (1) the development of libEnsemble; and (2) the development
of application-relevant algorithms to utilize libEnsemble.
\paragraph{Key Challenges}
A key challenge for scaling the PETSc/TAO numerical libraries to Exascale systems is that traditional
``sparse-matrix-based'' techniques for linear, nonlinear, and ODE solvers, as well as optimization
algorithms, are memory-bandwidth limited. Another difficulty is that any synchronizations
required across all compute units---for example, an inner product or a norm---can
dramatically affect the scaling of the solvers. Another challenge is the need to
support the variety of accelerators that will be available on the exascale systems
and the programming models that application teams use for performance
portability.
Running an ensemble of simulations requires a coordination layer that handles load balancing and
allows the collection of running simulations to grow and shrink based on feedback. Thus, our
libEnsemble library must be able to dynamically start simulations with different parameters,
resume simulations to obtain more accurate results, prune running simulations that the solvers
determine can no longer provide useful information, monitor the progress of the simulations,
and stop failed or hung simulations, and collect data from the individual simulations both
while they are running and at the end.
\paragraph{Solution Strategy}
To address the scalability of the numerical libraries, we implemented new solvers and data
structures including: pipeline Krylov methods that delay the use of the results of inner
products and norms, allowing overlapping of the reductions and other computation; partially
matrix-free solvers using high-order methods that have high floating-point-to-memory-access
ratios and good potential to use many-core and GPU-based systems; and in-node optimizations
of sparse matrix-matrix products needed by algebraic multigrid to better utilize many-core
systems.
Our strategy for coordinating ensemble computations has been to develop libEnsemble
to satisfy our needs. This library should not be confused with workflow-based
scripting systems; rather it is a library that, through the tight coupling and
feedback, directs the multiple concurrent ``function evaluations'' needed by
optimization, SA, and UQ solvers.
\paragraph{Accelerator Strategy}
Our overall strategy for accelerator support in PETSc/TAO is based on flexibility and a separation
of concerns by wrapping the data pointers from the user programming language and programming
model inside of PETSc Vector and Matrix vector objects. This approach allows us to focus
our effort on the kernels, such as vector, matrix-vector, matrix-matrix, and other fused
computational kernels, while the developer can focus on their application. We provide
multiple backends and support AMD, Intel, and NVIDIA accelerators. Thus, we support
the tools that the application developers are using, while obtaining performance in
the numerical libraries and kernels. The architecture can be found in
Figure~\ref{fig:petsc-tao-fig}.
\begin{figure}
\centering
\includegraphics[trim = 0in .2in 1.7in .2in, clip, width=0.9\textwidth]{projects/2.3.3-MathLibs/2.3.3.06-PETSc-TAO/petsc_arch}
\caption{The PETSc/TAO architecture enables users to utilize a variety of programming
models for GPUs independently of PETSc's internal programming model.}
\label{fig:petsc-tao-fig}
\end{figure}
\begin{figure}
\centering
\includegraphics[trim = 1in 3.6in 1in 2in, clip, width=0.9\textwidth]{projects/2.3.3-MathLibs/2.3.3.06-PETSc-TAO/petsc_perf}
\caption{Solve time for a 3D Laplacian with second-order elements. Larger grids are generated by uniform refinement. Runs are configured with six resource sets on each Summit node, each with one GPU and 4 MPI processes.}
\label{fig:petsc-tao-perf}
\end{figure}
Our primary performance portability layer is based on Kokkos and KokkosKernels, which supports
vector, matrix, and matrix-matrix operations. We also have full support for a native CUDA
backend and partial support for a native HIP and OpenCL backends. Figure~\ref{fig:petsc-tao-perf}
demonstrates performance portability via a scaling study with PETSc/TAO’s built-in algebraic multigrid
(AMG) solver, PCGAMG, using cuSPARSE and Kokkos (with KokkosKernels) back-ends on our most
mature device, CUDA, where we obtain competitive performance. The slower performance of
KokkosKernels is due to computing a transpose for matrix transpose multiply, which is
not yet natively supported in KokkosKernels.
The PETSc/TAO library has been compiled for Spock and Crusher and preliminary results have
been obtained using the accelerators and performance optimizations are being made.
The libEnsemble tools are based on python and makes calls to user-defined functions.
The user-defined functions can use the accelerators on the compute nodes during their
evaluation.
\paragraph{Recent Progress}
In the past year, we have released PETSc/TAO 3.16 (available at \url{http://www.mcs.anl.gov/petsc}), which
features enhanced GPU support. We have full support for a Kokkos plus KokkosKernels backend for
performance portability that includes vector, matrix, and matrix-matrix operations and full
support for a native CUDA backend. We have partial support for a native HIP and OpenCL
backends. We have been updating and profiling the GAMG solver, the native algebraic
multigrid solver in PETSc, to use the enhanced GPU support. Numerical results on
this work is available in \cite{mills2021toward}. We have also been updating
our scalable communication layers, PetscSF, which allows us to use GPU-aware
MPI, thus allowing direct communication of data between Summit GPUs, bypassing
the previously needed step of first copying the data to the CPU memory. Numerical
results on this work is available in \cite{zhang2021petscsf}.
We have also released libEnsemble 0.7.2 (available at \url{https://github.com/Libensemble/libensemble}).
This release includes new generator functions and examples, improved testing across available platforms,
and a new tutorial. A paper documenting libEnsemble and providing use cases is available
in \cite{hudson2021libensemble}.
\paragraph{Next Steps}
Our next efforts are:
\begin{enumerate}
\item \textbf{Application readiness}:
We will complete a status review of our applications for the early access hardware. We will
complete software quality initiatives related to the build system, architecture specific spack
recipes, and spack smoke tests for build and accelerator usage validation.
\item \textbf{PETSc/TAO + Kokkos + KokkosKernels release}:
We will release a version of PETSc/TAO with full Kokkos and KokkosKernels integration. We will
provide a tutorial on these features, characterize the performance, and suggest optimizations
and best practices.
We will also make improvements to the PETSc/TAO GAMG solver and provide updated performance results.
\item \textbf{libEnsemble + Balsam2 release}:
We will release a version of libEnsemble with full Balsam2 support and updated API.
We will follow continuous integration best practices and continue testing on pre-exascale DOE systems.
\item \textbf{PETSc/TAO + Optimized communication layer release}:
We will release a version of PETSc/TAO with an optimized communication layer for the early access systems.
This version will include support for AMGx on NVIDEA GPUs and a batch LU factorization and solver.
We will optimize some numerical methods (such as methods in DMNetwork and numerical optimization methods)
to use the communication layer and better utilize the accelerators.
We will provide initial benchmark results for GAMG and AMGx.
\end{enumerate}
| {
"alphanum_fraction": 0.8077671893,
"avg_line_length": 66.387755102,
"ext": "tex",
"hexsha": "449230bb15af174c40831956e521615dd1557299",
"lang": "TeX",
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2021-10-01T20:45:02.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-01T16:12:40.000Z",
"max_forks_repo_head_hexsha": "439b74be2cc7545b106ed36a1f6af42aebbe0994",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "thoasm/ECP-Report-Template",
"max_forks_repo_path": "projects/2.3.3-MathLibs/2.3.3.06-PETSc-TAO/2.3.3.06-PETSc-TAO.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "439b74be2cc7545b106ed36a1f6af42aebbe0994",
"max_issues_repo_issues_event_max_datetime": "2021-10-05T22:02:00.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-07T00:22:45.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "thoasm/ECP-Report-Template",
"max_issues_repo_path": "projects/2.3.3-MathLibs/2.3.3.06-PETSc-TAO/2.3.3.06-PETSc-TAO.tex",
"max_line_length": 222,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "439b74be2cc7545b106ed36a1f6af42aebbe0994",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "thoasm/ECP-Report-Template",
"max_stars_repo_path": "projects/2.3.3-MathLibs/2.3.3.06-PETSc-TAO/2.3.3.06-PETSc-TAO.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2192,
"size": 9759
} |
\chapter{Animation}
{\Rayshade}\ provides basic animation animation support by
allowing time-varying
transformations to be associated with primitives and aggregate objects.
Commands are provided for controlling the amount of time between each
frame, the speed of the camera shutter, and the total number of frames
to be rendered.
By default, {\rayshade} renders a single frame, with the shutter open for
an instant (0 units of time, in fact). The shutter speed in no way
changes the light-gathering properties of the camera, i.e., frames
rendered using
a longer exposure will not appear brighter than those with a shorter
exposure. The only change will be in the potential amount of movement
that the
frame ``sees'' during the time that the shutter is open.
Each ray cast by {\rayshade} samples a particular moment in time.
The time value assigned to a ray
ranges from the starting time of the current frame to the starting
time plus the amount of time the shutter is open. When
a ray encounters an object or texture that possesses an animated
transformation, the transformed
entity is moved into whatever position is appropriate
for the ray's current time value before intersection, shading, or texturing
computations are performed.
The starting time of the current frame is computed using the
length of each frame
the current frame number, and the starting time of the first frame.
\begin{defkey}{shutter}{{\em t}}
Specifies that the shutter is open for t units of
time for each exposure.
\end{defkey}
A larger value of {\em t} will lead to more motion blur in the final
image. Note that {\em t} may be greater than the actual length
of a frame. By default, {\em t} is zero, which prevents all motion blur.
\begin{defkey}{framelength}{{\em frameinc}}
Specifies the time increment between frames.
\end{defkey}
The default time between frames is 1 unit.
\begin{defkey}{starttime}{{\em time}}
Specifies the starting time of the first frame.
\end{defkey}
By default, {\em time} is zero.
Variables may be defined thorugh the use of the {\tt define} keyword:
\begin{defkey}{define}{{\em name value}}
Associate {\em name} with the given {\em value}. Value may
be a constant or a parenthesized expression.
\end{defkey}
The variable {\em name} may thereafter be used in expressions in the
input file.
An animated transformation is one for which animated expressions have
been used to define one or more of its parameters (e.g. the angle through
which a rotation occurs). An animated expression is one that makes
use of a time-varying (``animated'') variable or function.
There are two supported animated variables.
The first, {\tt time}, is equal to the current time.
When a ray encounters an animated
transformation defined using an expression containing {\tt time}, the ray
substitutes its time value into the expression before evaluation.
Using the {\tt time} variable in an animated expression is the most
basic way to create blur-causing motion.
The second animated variable, {\tt frame}, is equal to the current
frame number. Unlike the {\tt time} variable, {\tt frame} takes on
a single value for the duration of each frame. Thus, transforms
animated through the use of the {\tt frame} variable will not exhibit
motion blurring.
Also supported is the {\tt linear} function. This function uses
{\tt time} implicitly to interplate between two values.
\begin{defkey}{linear}{{\tt (} {\em Stime, Sval, Etime, Eval} {\tt )}}
Linearly interpolate between {\em Sval} at time
{\em Stime} and {\em Eval} at time {\em Etime}.
If the current time is less than {\em Stime}, the function
returns {\em Sval}. If the current time is greater than
{\em Etime}, {\em Eval} is returned.
\end{defkey}
The following example shows the use of the {\tt time} variable to
animate a sphere by translating it downwards over five frames.
Note thet the {\tt shutter} keyword is used to set the shutter duration
in order to induce motion blurring.
\begin{verbatim}
frames 5
shutter 1
sphere 1 0 0 2 translate 0 0 (-time)
\end{verbatim}
Further examples of animation may be found in the Examples directory
of the {\rayshade} distribution.
| {
"alphanum_fraction": 0.7665865385,
"avg_line_length": 40.3883495146,
"ext": "tex",
"hexsha": "21b203aa79497774ea7e59cfa7ed3ea158c16b4f",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2021-10-03T17:08:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-11-11T09:34:50.000Z",
"max_forks_repo_head_hexsha": "08ea3c0697442e7446442383456644bd915ece36",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "dspinellis/rayshade4",
"max_forks_repo_path": "Doc/Guide/animate.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "08ea3c0697442e7446442383456644bd915ece36",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "dspinellis/rayshade4",
"max_issues_repo_path": "Doc/Guide/animate.tex",
"max_line_length": 75,
"max_stars_count": 27,
"max_stars_repo_head_hexsha": "08ea3c0697442e7446442383456644bd915ece36",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "stricaud/rayshade4",
"max_stars_repo_path": "Doc/Guide/animate.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-29T02:18:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-11-11T09:35:11.000Z",
"num_tokens": 1004,
"size": 4160
} |
\documentclass{manual}
\usepackage{distutils}
% $Id: dist.tex 67731 2008-12-13 14:07:35Z martin.v.loewis $
% TODO
% Document extension.read_setup_file
% Document build_clib command
%
\title{Distributing Python Modules}
\input{boilerplate}
\author{Greg Ward\\
Anthony Baxter}
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{[email protected]}
}
\makeindex
\makemodindex
\begin{document}
\maketitle
\input{copyright}
\begin{abstract}
\noindent
This document describes the Python Distribution Utilities
(``Distutils'') from the module developer's point of view, describing
how to use the Distutils to make Python modules and extensions easily
available to a wider audience with very little overhead for
build/release/install mechanics.
\end{abstract}
% The ugly "%begin{latexonly}" pseudo-environment suppresses the table
% of contents for HTML generation.
%
%begin{latexonly}
\tableofcontents
%end{latexonly}
\chapter{An Introduction to Distutils}
\label{intro}
This document covers using the Distutils to distribute your Python
modules, concentrating on the role of developer/distributor: if
you're looking for information on installing Python modules, you
should refer to the \citetitle[../inst/inst.html]{Installing Python
Modules} manual.
\section{Concepts \& Terminology}
\label{concepts}
Using the Distutils is quite simple, both for module developers and for
users/administrators installing third-party modules. As a developer,
your responsibilities (apart from writing solid, well-documented and
well-tested code, of course!) are:
\begin{itemize}
\item write a setup script (\file{setup.py} by convention)
\item (optional) write a setup configuration file
\item create a source distribution
\item (optional) create one or more built (binary) distributions
\end{itemize}
Each of these tasks is covered in this document.
Not all module developers have access to a multitude of platforms, so
it's not always feasible to expect them to create a multitude of built
distributions. It is hoped that a class of intermediaries, called
\emph{packagers}, will arise to address this need. Packagers will take
source distributions released by module developers, build them on one or
more platforms, and release the resulting built distributions. Thus,
users on the most popular platforms will be able to install most popular
Python module distributions in the most natural way for their platform,
without having to run a single setup script or compile a line of code.
\section{A Simple Example}
\label{simple-example}
The setup script is usually quite simple, although since it's written
in Python, there are no arbitrary limits to what you can do with it,
though you should be careful about putting arbitrarily expensive
operations in your setup script. Unlike, say, Autoconf-style configure
scripts, the setup script may be run multiple times in the course of
building and installing your module distribution.
If all you want to do is distribute a module called \module{foo},
contained in a file \file{foo.py}, then your setup script can be as
simple as this:
\begin{verbatim}
from distutils.core import setup
setup(name='foo',
version='1.0',
py_modules=['foo'],
)
\end{verbatim}
Some observations:
\begin{itemize}
\item most information that you supply to the Distutils is supplied as
keyword arguments to the \function{setup()} function
\item those keyword arguments fall into two categories: package
metadata (name, version number) and information about what's in the
package (a list of pure Python modules, in this case)
\item modules are specified by module name, not filename (the same will
hold true for packages and extensions)
\item it's recommended that you supply a little more metadata, in
particular your name, email address and a URL for the project
(see section~\ref{setup-script} for an example)
\end{itemize}
To create a source distribution for this module, you would create a
setup script, \file{setup.py}, containing the above code, and run:
\begin{verbatim}
python setup.py sdist
\end{verbatim}
which will create an archive file (e.g., tarball on \UNIX, ZIP file on
Windows) containing your setup script \file{setup.py}, and your module
\file{foo.py}. The archive file will be named \file{foo-1.0.tar.gz} (or
\file{.zip}), and will unpack into a directory \file{foo-1.0}.
If an end-user wishes to install your \module{foo} module, all she has
to do is download \file{foo-1.0.tar.gz} (or \file{.zip}), unpack it,
and---from the \file{foo-1.0} directory---run
\begin{verbatim}
python setup.py install
\end{verbatim}
which will ultimately copy \file{foo.py} to the appropriate directory
for third-party modules in their Python installation.
This simple example demonstrates some fundamental concepts of the
Distutils. First, both developers and installers have the same basic
user interface, i.e. the setup script. The difference is which
Distutils \emph{commands} they use: the \command{sdist} command is
almost exclusively for module developers, while \command{install} is
more often for installers (although most developers will want to install
their own code occasionally).
If you want to make things really easy for your users, you can create
one or more built distributions for them. For instance, if you are
running on a Windows machine, and want to make things easy for other
Windows users, you can create an executable installer (the most
appropriate type of built distribution for this platform) with the
\command{bdist\_wininst} command. For example:
\begin{verbatim}
python setup.py bdist_wininst
\end{verbatim}
will create an executable installer, \file{foo-1.0.win32.exe}, in the
current directory.
Other useful built distribution formats are RPM, implemented by the
\command{bdist\_rpm} command, Solaris \program{pkgtool}
(\command{bdist\_pkgtool}), and HP-UX \program{swinstall}
(\command{bdist_sdux}). For example, the following command will
create an RPM file called \file{foo-1.0.noarch.rpm}:
\begin{verbatim}
python setup.py bdist_rpm
\end{verbatim}
(The \command{bdist\_rpm} command uses the \command{rpm} executable,
therefore this has to be run on an RPM-based system such as Red Hat
Linux, SuSE Linux, or Mandrake Linux.)
You can find out what distribution formats are available at any time by
running
\begin{verbatim}
python setup.py bdist --help-formats
\end{verbatim}
\section{General Python terminology}
\label{python-terms}
If you're reading this document, you probably have a good idea of what
modules, extensions, and so forth are. Nevertheless, just to be sure
that everyone is operating from a common starting point, we offer the
following glossary of common Python terms:
\begin{description}
\item[module] the basic unit of code reusability in Python: a block of
code imported by some other code. Three types of modules concern us
here: pure Python modules, extension modules, and packages.
\item[pure Python module] a module written in Python and contained in a
single \file{.py} file (and possibly associated \file{.pyc} and/or
\file{.pyo} files). Sometimes referred to as a ``pure module.''
\item[extension module] a module written in the low-level language of
the Python implementation: C/\Cpp{} for Python, Java for Jython.
Typically contained in a single dynamically loadable pre-compiled
file, e.g. a shared object (\file{.so}) file for Python extensions on
\UNIX, a DLL (given the \file{.pyd} extension) for Python extensions
on Windows, or a Java class file for Jython extensions. (Note that
currently, the Distutils only handles C/\Cpp{} extensions for Python.)
\item[package] a module that contains other modules; typically contained
in a directory in the filesystem and distinguished from other
directories by the presence of a file \file{\_\_init\_\_.py}.
\item[root package] the root of the hierarchy of packages. (This isn't
really a package, since it doesn't have an \file{\_\_init\_\_.py}
file. But we have to call it something.) The vast majority of the
standard library is in the root package, as are many small, standalone
third-party modules that don't belong to a larger module collection.
Unlike regular packages, modules in the root package can be found in
many directories: in fact, every directory listed in \code{sys.path}
contributes modules to the root package.
\end{description}
\section{Distutils-specific terminology}
\label{distutils-term}
The following terms apply more specifically to the domain of
distributing Python modules using the Distutils:
\begin{description}
\item[module distribution] a collection of Python modules distributed
together as a single downloadable resource and meant to be installed
\emph{en masse}. Examples of some well-known module distributions are
Numeric Python, PyXML, PIL (the Python Imaging Library), or
mxBase. (This would be called a \emph{package}, except that term
is already taken in the Python context: a single module distribution
may contain zero, one, or many Python packages.)
\item[pure module distribution] a module distribution that contains only
pure Python modules and packages. Sometimes referred to as a ``pure
distribution.''
\item[non-pure module distribution] a module distribution that contains
at least one extension module. Sometimes referred to as a ``non-pure
distribution.''
\item[distribution root] the top-level directory of your source tree (or
source distribution); the directory where \file{setup.py} exists. Generally
\file{setup.py} will be run from this directory.
\end{description}
\chapter{Writing the Setup Script}
\label{setup-script}
The setup script is the centre of all activity in building,
distributing, and installing modules using the Distutils. The main
purpose of the setup script is to describe your module distribution to
the Distutils, so that the various commands that operate on your modules
do the right thing. As we saw in section~\ref{simple-example} above,
the setup script consists mainly of a call to \function{setup()}, and
most information supplied to the Distutils by the module developer is
supplied as keyword arguments to \function{setup()}.
Here's a slightly more involved example, which we'll follow for the next
couple of sections: the Distutils' own setup script. (Keep in mind that
although the Distutils are included with Python 1.6 and later, they also
have an independent existence so that Python 1.5.2 users can use them to
install other module distributions. The Distutils' own setup script,
shown here, is used to install the package into Python 1.5.2.)
\begin{verbatim}
#!/usr/bin/env python
from distutils.core import setup
setup(name='Distutils',
version='1.0',
description='Python Distribution Utilities',
author='Greg Ward',
author_email='[email protected]',
url='http://www.python.org/sigs/distutils-sig/',
packages=['distutils', 'distutils.command'],
)
\end{verbatim}
There are only two differences between this and the trivial one-file
distribution presented in section~\ref{simple-example}: more
metadata, and the specification of pure Python modules by package,
rather than by module. This is important since the Distutils consist of
a couple of dozen modules split into (so far) two packages; an explicit
list of every module would be tedious to generate and difficult to
maintain. For more information on the additional meta-data, see
section~\ref{meta-data}.
Note that any pathnames (files or directories) supplied in the setup
script should be written using the \UNIX{} convention, i.e.
slash-separated. The Distutils will take care of converting this
platform-neutral representation into whatever is appropriate on your
current platform before actually using the pathname. This makes your
setup script portable across operating systems, which of course is one
of the major goals of the Distutils. In this spirit, all pathnames in
this document are slash-separated. (Mac OS 9 programmers should keep in
mind that the \emph{absence} of a leading slash indicates a relative
path, the opposite of the Mac OS convention with colons.)
This, of course, only applies to pathnames given to Distutils
functions. If you, for example, use standard Python functions such as
\function{glob.glob()} or \function{os.listdir()} to specify files, you
should be careful to write portable code instead of hardcoding path
separators:
\begin{verbatim}
glob.glob(os.path.join('mydir', 'subdir', '*.html'))
os.listdir(os.path.join('mydir', 'subdir'))
\end{verbatim}
\section{Listing whole packages}
\label{listing-packages}
The \option{packages} option tells the Distutils to process (build,
distribute, install, etc.) all pure Python modules found in each package
mentioned in the \option{packages} list. In order to do this, of
course, there has to be a correspondence between package names and
directories in the filesystem. The default correspondence is the most
obvious one, i.e. package \module{distutils} is found in the directory
\file{distutils} relative to the distribution root. Thus, when you say
\code{packages = ['foo']} in your setup script, you are promising that
the Distutils will find a file \file{foo/\_\_init\_\_.py} (which might
be spelled differently on your system, but you get the idea) relative to
the directory where your setup script lives. If you break this
promise, the Distutils will issue a warning but still process the broken
package anyways.
If you use a different convention to lay out your source directory,
that's no problem: you just have to supply the \option{package\_dir}
option to tell the Distutils about your convention. For example, say
you keep all Python source under \file{lib}, so that modules in the
``root package'' (i.e., not in any package at all) are in
\file{lib}, modules in the \module{foo} package are in \file{lib/foo},
and so forth. Then you would put
\begin{verbatim}
package_dir = {'': 'lib'}
\end{verbatim}
in your setup script. The keys to this dictionary are package names,
and an empty package name stands for the root package. The values are
directory names relative to your distribution root. In this case, when
you say \code{packages = ['foo']}, you are promising that the file
\file{lib/foo/\_\_init\_\_.py} exists.
Another possible convention is to put the \module{foo} package right in
\file{lib}, the \module{foo.bar} package in \file{lib/bar}, etc. This
would be written in the setup script as
\begin{verbatim}
package_dir = {'foo': 'lib'}
\end{verbatim}
A \code{\var{package}: \var{dir}} entry in the \option{package\_dir}
dictionary implicitly applies to all packages below \var{package}, so
the \module{foo.bar} case is automatically handled here. In this
example, having \code{packages = ['foo', 'foo.bar']} tells the Distutils
to look for \file{lib/\_\_init\_\_.py} and
\file{lib/bar/\_\_init\_\_.py}. (Keep in mind that although
\option{package\_dir} applies recursively, you must explicitly list all
packages in \option{packages}: the Distutils will \emph{not} recursively
scan your source tree looking for any directory with an
\file{\_\_init\_\_.py} file.)
\section{Listing individual modules}
\label{listing-modules}
For a small module distribution, you might prefer to list all modules
rather than listing packages---especially the case of a single module
that goes in the ``root package'' (i.e., no package at all). This
simplest case was shown in section~\ref{simple-example}; here is a
slightly more involved example:
\begin{verbatim}
py_modules = ['mod1', 'pkg.mod2']
\end{verbatim}
This describes two modules, one of them in the ``root'' package, the
other in the \module{pkg} package. Again, the default package/directory
layout implies that these two modules can be found in \file{mod1.py} and
\file{pkg/mod2.py}, and that \file{pkg/\_\_init\_\_.py} exists as well.
And again, you can override the package/directory correspondence using
the \option{package\_dir} option.
\section{Describing extension modules}
\label{describing-extensions}
% XXX read over this section
Just as writing Python extension modules is a bit more complicated than
writing pure Python modules, describing them to the Distutils is a bit
more complicated. Unlike pure modules, it's not enough just to list
modules or packages and expect the Distutils to go out and find the
right files; you have to specify the extension name, source file(s), and
any compile/link requirements (include directories, libraries to link
with, etc.).
All of this is done through another keyword argument to
\function{setup()}, the \option{ext_modules} option. \option{ext_modules}
is just a list of \class{Extension} instances, each of which describes a
single extension module. Suppose your distribution includes a single
extension, called \module{foo} and implemented by \file{foo.c}. If no
additional instructions to the compiler/linker are needed, describing
this extension is quite simple:
\begin{verbatim}
Extension('foo', ['foo.c'])
\end{verbatim}
The \class{Extension} class can be imported from
\module{distutils.core} along with \function{setup()}. Thus, the setup
script for a module distribution that contains only this one extension
and nothing else might be:
\begin{verbatim}
from distutils.core import setup, Extension
setup(name='foo',
version='1.0',
ext_modules=[Extension('foo', ['foo.c'])],
)
\end{verbatim}
The \class{Extension} class (actually, the underlying extension-building
machinery implemented by the \command{build\_ext} command) supports a
great deal of flexibility in describing Python extensions, which is
explained in the following sections.
\subsection{Extension names and packages}
The first argument to the \class{Extension} constructor is always the
name of the extension, including any package names. For example,
\begin{verbatim}
Extension('foo', ['src/foo1.c', 'src/foo2.c'])
\end{verbatim}
describes an extension that lives in the root package, while
\begin{verbatim}
Extension('pkg.foo', ['src/foo1.c', 'src/foo2.c'])
\end{verbatim}
describes the same extension in the \module{pkg} package. The source
files and resulting object code are identical in both cases; the only
difference is where in the filesystem (and therefore where in Python's
namespace hierarchy) the resulting extension lives.
If you have a number of extensions all in the same package (or all under
the same base package), use the \option{ext\_package} keyword argument
to \function{setup()}. For example,
\begin{verbatim}
setup(...
ext_package='pkg',
ext_modules=[Extension('foo', ['foo.c']),
Extension('subpkg.bar', ['bar.c'])],
)
\end{verbatim}
will compile \file{foo.c} to the extension \module{pkg.foo}, and
\file{bar.c} to \module{pkg.subpkg.bar}.
\subsection{Extension source files}
The second argument to the \class{Extension} constructor is a list of
source files. Since the Distutils currently only support C, \Cpp, and
Objective-C extensions, these are normally C/\Cpp/Objective-C source
files. (Be sure to use appropriate extensions to distinguish \Cpp\
source files: \file{.cc} and \file{.cpp} seem to be recognized by both
\UNIX{} and Windows compilers.)
However, you can also include SWIG interface (\file{.i}) files in the
list; the \command{build\_ext} command knows how to deal with SWIG
extensions: it will run SWIG on the interface file and compile the
resulting C/\Cpp{} file into your extension.
\XXX{SWIG support is rough around the edges and largely untested!}
This warning notwithstanding, options to SWIG can be currently passed
like this:
\begin{verbatim}
setup(...
ext_modules=[Extension('_foo', ['foo.i'],
swig_opts=['-modern', '-I../include'])],
py_modules=['foo'],
)
\end{verbatim}
Or on the commandline like this:
\begin{verbatim}
> python setup.py build_ext --swig-opts="-modern -I../include"
\end{verbatim}
On some platforms, you can include non-source files that are processed
by the compiler and included in your extension. Currently, this just
means Windows message text (\file{.mc}) files and resource definition
(\file{.rc}) files for Visual \Cpp. These will be compiled to binary resource
(\file{.res}) files and linked into the executable.
\subsection{Preprocessor options}
Three optional arguments to \class{Extension} will help if you need to
specify include directories to search or preprocessor macros to
define/undefine: \code{include\_dirs}, \code{define\_macros}, and
\code{undef\_macros}.
For example, if your extension requires header files in the
\file{include} directory under your distribution root, use the
\code{include\_dirs} option:
\begin{verbatim}
Extension('foo', ['foo.c'], include_dirs=['include'])
\end{verbatim}
You can specify absolute directories there; if you know that your
extension will only be built on \UNIX{} systems with X11R6 installed to
\file{/usr}, you can get away with
\begin{verbatim}
Extension('foo', ['foo.c'], include_dirs=['/usr/include/X11'])
\end{verbatim}
You should avoid this sort of non-portable usage if you plan to
distribute your code: it's probably better to write C code like
\begin{verbatim}
#include <X11/Xlib.h>
\end{verbatim}
If you need to include header files from some other Python extension,
you can take advantage of the fact that header files are installed in a
consistent way by the Distutils \command{install\_header} command. For
example, the Numerical Python header files are installed (on a standard
\UNIX{} installation) to \file{/usr/local/include/python1.5/Numerical}.
(The exact location will differ according to your platform and Python
installation.) Since the Python include
directory---\file{/usr/local/include/python1.5} in this case---is always
included in the search path when building Python extensions, the best
approach is to write C code like
\begin{verbatim}
#include <Numerical/arrayobject.h>
\end{verbatim}
If you must put the \file{Numerical} include directory right into your
header search path, though, you can find that directory using the
Distutils \refmodule{distutils.sysconfig} module:
\begin{verbatim}
from distutils.sysconfig import get_python_inc
incdir = os.path.join(get_python_inc(plat_specific=1), 'Numerical')
setup(...,
Extension(..., include_dirs=[incdir]),
)
\end{verbatim}
Even though this is quite portable---it will work on any Python
installation, regardless of platform---it's probably easier to just
write your C code in the sensible way.
You can define and undefine pre-processor macros with the
\code{define\_macros} and \code{undef\_macros} options.
\code{define\_macros} takes a list of \code{(name, value)} tuples, where
\code{name} is the name of the macro to define (a string) and
\code{value} is its value: either a string or \code{None}. (Defining a
macro \code{FOO} to \code{None} is the equivalent of a bare
\code{\#define FOO} in your C source: with most compilers, this sets
\code{FOO} to the string \code{1}.) \code{undef\_macros} is just
a list of macros to undefine.
For example:
\begin{verbatim}
Extension(...,
define_macros=[('NDEBUG', '1'),
('HAVE_STRFTIME', None)],
undef_macros=['HAVE_FOO', 'HAVE_BAR'])
\end{verbatim}
is the equivalent of having this at the top of every C source file:
\begin{verbatim}
#define NDEBUG 1
#define HAVE_STRFTIME
#undef HAVE_FOO
#undef HAVE_BAR
\end{verbatim}
\subsection{Library options}
You can also specify the libraries to link against when building your
extension, and the directories to search for those libraries. The
\code{libraries} option is a list of libraries to link against,
\code{library\_dirs} is a list of directories to search for libraries at
link-time, and \code{runtime\_library\_dirs} is a list of directories to
search for shared (dynamically loaded) libraries at run-time.
For example, if you need to link against libraries known to be in the
standard library search path on target systems
\begin{verbatim}
Extension(...,
libraries=['gdbm', 'readline'])
\end{verbatim}
If you need to link with libraries in a non-standard location, you'll
have to include the location in \code{library\_dirs}:
\begin{verbatim}
Extension(...,
library_dirs=['/usr/X11R6/lib'],
libraries=['X11', 'Xt'])
\end{verbatim}
(Again, this sort of non-portable construct should be avoided if you
intend to distribute your code.)
\XXX{Should mention clib libraries here or somewhere else!}
\subsection{Other options}
There are still some other options which can be used to handle special
cases.
The \option{extra\_objects} option is a list of object files to be passed
to the linker. These files must not have extensions, as the default
extension for the compiler is used.
\option{extra\_compile\_args} and \option{extra\_link\_args} can be used
to specify additional command line options for the respective compiler and
linker command lines.
\option{export\_symbols} is only useful on Windows. It can contain a list
of symbols (functions or variables) to be exported. This option
is not needed when building compiled extensions: Distutils
will automatically add \code{initmodule}
to the list of exported symbols.
\section{Relationships between Distributions and Packages}
A distribution may relate to packages in three specific ways:
\begin{enumerate}
\item It can require packages or modules.
\item It can provide packages or modules.
\item It can obsolete packages or modules.
\end{enumerate}
These relationships can be specified using keyword arguments to the
\function{distutils.core.setup()} function.
Dependencies on other Python modules and packages can be specified by
supplying the \var{requires} keyword argument to \function{setup()}.
The value must be a list of strings. Each string specifies a package
that is required, and optionally what versions are sufficient.
To specify that any version of a module or package is required, the
string should consist entirely of the module or package name.
Examples include \code{'mymodule'} and \code{'xml.parsers.expat'}.
If specific versions are required, a sequence of qualifiers can be
supplied in parentheses. Each qualifier may consist of a comparison
operator and a version number. The accepted comparison operators are:
\begin{verbatim}
< > ==
<= >= !=
\end{verbatim}
These can be combined by using multiple qualifiers separated by commas
(and optional whitespace). In this case, all of the qualifiers must
be matched; a logical AND is used to combine the evaluations.
Let's look at a bunch of examples:
\begin{tableii}{l|l}{code}{Requires Expression}{Explanation}
\lineii{==1.0} {Only version \code{1.0} is compatible}
\lineii{>1.0, !=1.5.1, <2.0} {Any version after \code{1.0} and before
\code{2.0} is compatible, except
\code{1.5.1}}
\end{tableii}
Now that we can specify dependencies, we also need to be able to
specify what we provide that other distributions can require. This is
done using the \var{provides} keyword argument to \function{setup()}.
The value for this keyword is a list of strings, each of which names a
Python module or package, and optionally identifies the version. If
the version is not specified, it is assumed to match that of the
distribution.
Some examples:
\begin{tableii}{l|l}{code}{Provides Expression}{Explanation}
\lineii{mypkg} {Provide \code{mypkg}, using the distribution version}
\lineii{mypkg (1.1)} {Provide \code{mypkg} version 1.1, regardless of the
distribution version}
\end{tableii}
A package can declare that it obsoletes other packages using the
\var{obsoletes} keyword argument. The value for this is similar to
that of the \var{requires} keyword: a list of strings giving module or
package specifiers. Each specifier consists of a module or package
name optionally followed by one or more version qualifiers. Version
qualifiers are given in parentheses after the module or package name.
The versions identified by the qualifiers are those that are obsoleted
by the distribution being described. If no qualifiers are given, all
versions of the named module or package are understood to be
obsoleted.
\section{Installing Scripts}
So far we have been dealing with pure and non-pure Python modules,
which are usually not run by themselves but imported by scripts.
Scripts are files containing Python source code, intended to be
started from the command line. Scripts don't require Distutils to do
anything very complicated. The only clever feature is that if the
first line of the script starts with \code{\#!} and contains the word
``python'', the Distutils will adjust the first line to refer to the
current interpreter location. By default, it is replaced with the
current interpreter location. The \longprogramopt{executable} (or
\programopt{-e}) option will allow the interpreter path to be
explicitly overridden.
The \option{scripts} option simply is a list of files to be handled
in this way. From the PyXML setup script:
\begin{verbatim}
setup(...
scripts=['scripts/xmlproc_parse', 'scripts/xmlproc_val']
)
\end{verbatim}
\section{Installing Package Data}
Often, additional files need to be installed into a package. These
files are often data that's closely related to the package's
implementation, or text files containing documentation that might be
of interest to programmers using the package. These files are called
\dfn{package data}.
Package data can be added to packages using the \code{package_data}
keyword argument to the \function{setup()} function. The value must
be a mapping from package name to a list of relative path names that
should be copied into the package. The paths are interpreted as
relative to the directory containing the package (information from the
\code{package_dir} mapping is used if appropriate); that is, the files
are expected to be part of the package in the source directories.
They may contain glob patterns as well.
The path names may contain directory portions; any necessary
directories will be created in the installation.
For example, if a package should contain a subdirectory with several
data files, the files can be arranged like this in the source tree:
\begin{verbatim}
setup.py
src/
mypkg/
__init__.py
module.py
data/
tables.dat
spoons.dat
forks.dat
\end{verbatim}
The corresponding call to \function{setup()} might be:
\begin{verbatim}
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
\end{verbatim}
\versionadded{2.4}
\section{Installing Additional Files}
The \option{data\_files} option can be used to specify additional
files needed by the module distribution: configuration files, message
catalogs, data files, anything which doesn't fit in the previous
categories.
\option{data\_files} specifies a sequence of (\var{directory},
\var{files}) pairs in the following way:
\begin{verbatim}
setup(...
data_files=[('bitmaps', ['bm/b1.gif', 'bm/b2.gif']),
('config', ['cfg/data.cfg']),
('/etc/init.d', ['init-script'])]
)
\end{verbatim}
Note that you can specify the directory names where the data files
will be installed, but you cannot rename the data files themselves.
Each (\var{directory}, \var{files}) pair in the sequence specifies the
installation directory and the files to install there. If
\var{directory} is a relative path, it is interpreted relative to the
installation prefix (Python's \code{sys.prefix} for pure-Python
packages, \code{sys.exec_prefix} for packages that contain extension
modules). Each file name in \var{files} is interpreted relative to
the \file{setup.py} script at the top of the package source
distribution. No directory information from \var{files} is used to
determine the final location of the installed file; only the name of
the file is used.
You can specify the \option{data\_files} options as a simple sequence
of files without specifying a target directory, but this is not recommended,
and the \command{install} command will print a warning in this case.
To install data files directly in the target directory, an empty
string should be given as the directory.
\section{Additional meta-data}
\label{meta-data}
The setup script may include additional meta-data beyond the name and
version. This information includes:
\begin{tableiv}{l|l|l|c}{code}%
{Meta-Data}{Description}{Value}{Notes}
\lineiv{name}{name of the package}
{short string}{(1)}
\lineiv{version}{version of this release}
{short string}{(1)(2)}
\lineiv{author}{package author's name}
{short string}{(3)}
\lineiv{author_email}{email address of the package author}
{email address}{(3)}
\lineiv{maintainer}{package maintainer's name}
{short string}{(3)}
\lineiv{maintainer_email}{email address of the package maintainer}
{email address}{(3)}
\lineiv{url}{home page for the package}
{URL}{(1)}
\lineiv{description}{short, summary description of the package}
{short string}{}
\lineiv{long_description}{longer description of the package}
{long string}{}
\lineiv{download_url}{location where the package may be downloaded}
{URL}{(4)}
\lineiv{classifiers}{a list of classifiers}
{list of strings}{(4)}
\lineiv{platforms}{a list of platforms}
{list of strings}{}
\end{tableiv}
\noindent Notes:
\begin{description}
\item[(1)] These fields are required.
\item[(2)] It is recommended that versions take the form
\emph{major.minor\optional{.patch\optional{.sub}}}.
\item[(3)] Either the author or the maintainer must be identified.
\item[(4)] These fields should not be used if your package is to be
compatible with Python versions prior to 2.2.3 or 2.3. The list is
available from the \ulink{PyPI website}{http://www.python.org/pypi}.
\item['short string'] A single line of text, not more than 200 characters.
\item['long string'] Multiple lines of plain text in reStructuredText
format (see \url{http://docutils.sf.net/}).
\item['list of strings'] See below.
\end{description}
None of the string values may be Unicode.
Encoding the version information is an art in itself. Python packages
generally adhere to the version format
\emph{major.minor\optional{.patch}\optional{sub}}. The major number is
0 for
initial, experimental releases of software. It is incremented for
releases that represent major milestones in a package. The minor
number is incremented when important new features are added to the
package. The patch number increments when bug-fix releases are
made. Additional trailing version information is sometimes used to
indicate sub-releases. These are "a1,a2,...,aN" (for alpha releases,
where functionality and API may change), "b1,b2,...,bN" (for beta
releases, which only fix bugs) and "pr1,pr2,...,prN" (for final
pre-release release testing). Some examples:
\begin{description}
\item[0.1.0] the first, experimental release of a package
\item[1.0.1a2] the second alpha release of the first patch version of 1.0
\end{description}
\option{classifiers} are specified in a python list:
\begin{verbatim}
setup(...
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications :: Email',
'Topic :: Office/Business',
'Topic :: Software Development :: Bug Tracking',
],
)
\end{verbatim}
If you wish to include classifiers in your \file{setup.py} file and also
wish to remain backwards-compatible with Python releases prior to 2.2.3,
then you can include the following code fragment in your \file{setup.py}
before the \function{setup()} call.
\begin{verbatim}
# patch distutils if it can't cope with the "classifiers" or
# "download_url" keywords
from sys import version
if version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
\end{verbatim}
\section{Debugging the setup script}
Sometimes things go wrong, and the setup script doesn't do what the
developer wants.
Distutils catches any exceptions when running the setup script, and
print a simple error message before the script is terminated. The
motivation for this behaviour is to not confuse administrators who
don't know much about Python and are trying to install a package. If
they get a big long traceback from deep inside the guts of Distutils,
they may think the package or the Python installation is broken
because they don't read all the way down to the bottom and see that
it's a permission problem.
On the other hand, this doesn't help the developer to find the cause
of the failure. For this purpose, the DISTUTILS_DEBUG environment
variable can be set to anything except an empty string, and distutils
will now print detailed information what it is doing, and prints the
full traceback in case an exception occurs.
\chapter{Writing the Setup Configuration File}
\label{setup-config}
Often, it's not possible to write down everything needed to build a
distribution \emph{a priori}: you may need to get some information from
the user, or from the user's system, in order to proceed. As long as
that information is fairly simple---a list of directories to search for
C header files or libraries, for example---then providing a
configuration file, \file{setup.cfg}, for users to edit is a cheap and
easy way to solicit it. Configuration files also let you provide
default values for any command option, which the installer can then
override either on the command-line or by editing the config file.
% (If you have more advanced needs, such as determining which extensions
% to build based on what capabilities are present on the target system,
% then you need the Distutils ``auto-configuration'' facility. This
% started to appear in Distutils 0.9 but, as of this writing, isn't mature
% or stable enough yet for real-world use.)
The setup configuration file is a useful middle-ground between the setup
script---which, ideally, would be opaque to installers\footnote{This
ideal probably won't be achieved until auto-configuration is fully
supported by the Distutils.}---and the command-line to the setup
script, which is outside of your control and entirely up to the
installer. In fact, \file{setup.cfg} (and any other Distutils
configuration files present on the target system) are processed after
the contents of the setup script, but before the command-line. This has
several useful consequences:
\begin{itemize}
\item installers can override some of what you put in \file{setup.py} by
editing \file{setup.cfg}
\item you can provide non-standard defaults for options that are not
easily set in \file{setup.py}
\item installers can override anything in \file{setup.cfg} using the
command-line options to \file{setup.py}
\end{itemize}
The basic syntax of the configuration file is simple:
\begin{verbatim}
[command]
option=value
...
\end{verbatim}
where \var{command} is one of the Distutils commands (e.g.
\command{build\_py}, \command{install}), and \var{option} is one of
the options that command supports. Any number of options can be
supplied for each command, and any number of command sections can be
included in the file. Blank lines are ignored, as are comments, which
run from a \character{\#} character until the end of the line. Long
option values can be split across multiple lines simply by indenting
the continuation lines.
You can find out the list of options supported by a particular command
with the universal \longprogramopt{help} option, e.g.
\begin{verbatim}
> python setup.py --help build_ext
[...]
Options for 'build_ext' command:
--build-lib (-b) directory for compiled extension modules
--build-temp (-t) directory for temporary files (build by-products)
--inplace (-i) ignore build-lib and put compiled extensions into the
source directory alongside your pure Python modules
--include-dirs (-I) list of directories to search for header files
--define (-D) C preprocessor macros to define
--undef (-U) C preprocessor macros to undefine
--swig-opts list of SWIG command line options
[...]
\end{verbatim}
Note that an option spelled \longprogramopt{foo-bar} on the command-line
is spelled \option{foo\_bar} in configuration files.
For example, say you want your extensions to be built
``in-place''---that is, you have an extension \module{pkg.ext}, and you
want the compiled extension file (\file{ext.so} on \UNIX, say) to be put
in the same source directory as your pure Python modules
\module{pkg.mod1} and \module{pkg.mod2}. You can always use the
\longprogramopt{inplace} option on the command-line to ensure this:
\begin{verbatim}
python setup.py build_ext --inplace
\end{verbatim}
But this requires that you always specify the \command{build\_ext}
command explicitly, and remember to provide \longprogramopt{inplace}.
An easier way is to ``set and forget'' this option, by encoding it in
\file{setup.cfg}, the configuration file for this distribution:
\begin{verbatim}
[build_ext]
inplace=1
\end{verbatim}
This will affect all builds of this module distribution, whether or not
you explicitly specify \command{build\_ext}. If you include
\file{setup.cfg} in your source distribution, it will also affect
end-user builds---which is probably a bad idea for this option, since
always building extensions in-place would break installation of the
module distribution. In certain peculiar cases, though, modules are
built right in their installation directory, so this is conceivably a
useful ability. (Distributing extensions that expect to be built in
their installation directory is almost always a bad idea, though.)
Another example: certain commands take a lot of options that don't
change from run to run; for example, \command{bdist\_rpm} needs to know
everything required to generate a ``spec'' file for creating an RPM
distribution. Some of this information comes from the setup script, and
some is automatically generated by the Distutils (such as the list of
files installed). But some of it has to be supplied as options to
\command{bdist\_rpm}, which would be very tedious to do on the
command-line for every run. Hence, here is a snippet from the
Distutils' own \file{setup.cfg}:
\begin{verbatim}
[bdist_rpm]
release = 1
packager = Greg Ward <[email protected]>
doc_files = CHANGES.txt
README.txt
USAGE.txt
doc/
examples/
\end{verbatim}
Note that the \option{doc\_files} option is simply a
whitespace-separated string split across multiple lines for readability.
\begin{seealso}
\seetitle[../inst/config-syntax.html]{Installing Python
Modules}{More information on the configuration files is
available in the manual for system administrators.}
\end{seealso}
\chapter{Creating a Source Distribution}
\label{source-dist}
As shown in section~\ref{simple-example}, you use the
\command{sdist} command to create a source distribution. In the
simplest case,
\begin{verbatim}
python setup.py sdist
\end{verbatim}
(assuming you haven't specified any \command{sdist} options in the setup
script or config file), \command{sdist} creates the archive of the
default format for the current platform. The default format is a gzip'ed
tar file (\file{.tar.gz}) on \UNIX, and ZIP file on Windows.
You can specify as many formats as you like using the
\longprogramopt{formats} option, for example:
\begin{verbatim}
python setup.py sdist --formats=gztar,zip
\end{verbatim}
to create a gzipped tarball and a zip file. The available formats are:
\begin{tableiii}{l|l|c}{code}%
{Format}{Description}{Notes}
\lineiii{zip}{zip file (\file{.zip})}{(1),(3)}
\lineiii{gztar}{gzip'ed tar file (\file{.tar.gz})}{(2),(4)}
\lineiii{bztar}{bzip2'ed tar file (\file{.tar.bz2})}{(4)}
\lineiii{ztar}{compressed tar file (\file{.tar.Z})}{(4)}
\lineiii{tar}{tar file (\file{.tar})}{(4)}
\end{tableiii}
\noindent Notes:
\begin{description}
\item[(1)] default on Windows
\item[(2)] default on \UNIX
\item[(3)] requires either external \program{zip} utility or
\module{zipfile} module (part of the standard Python library since
Python~1.6)
\item[(4)] requires external utilities: \program{tar} and possibly one
of \program{gzip}, \program{bzip2}, or \program{compress}
\end{description}
\section{Specifying the files to distribute}
\label{manifest}
If you don't supply an explicit list of files (or instructions on how to
generate one), the \command{sdist} command puts a minimal default set
into the source distribution:
\begin{itemize}
\item all Python source files implied by the \option{py\_modules} and
\option{packages} options
\item all C source files mentioned in the \option{ext\_modules} or
\option{libraries} options (\XXX{getting C library sources currently
broken---no \method{get_source_files()} method in \file{build_clib.py}!})
\item scripts identified by the \option{scripts} option
\item anything that looks like a test script: \file{test/test*.py}
(currently, the Distutils don't do anything with test scripts except
include them in source distributions, but in the future there will be
a standard for testing Python module distributions)
\item \file{README.txt} (or \file{README}), \file{setup.py} (or whatever
you called your setup script), and \file{setup.cfg}
\end{itemize}
Sometimes this is enough, but usually you will want to specify
additional files to distribute. The typical way to do this is to write
a \emph{manifest template}, called \file{MANIFEST.in} by default. The
manifest template is just a list of instructions for how to generate
your manifest file, \file{MANIFEST}, which is the exact list of files to
include in your source distribution. The \command{sdist} command
processes this template and generates a manifest based on its
instructions and what it finds in the filesystem.
If you prefer to roll your own manifest file, the format is simple: one
filename per line, regular files (or symlinks to them) only. If you do
supply your own \file{MANIFEST}, you must specify everything: the
default set of files described above does not apply in this case.
The manifest template has one command per line, where each command
specifies a set of files to include or exclude from the source
distribution. For an example, again we turn to the Distutils' own
manifest template:
\begin{verbatim}
include *.txt
recursive-include examples *.txt *.py
prune examples/sample?/build
\end{verbatim}
The meanings should be fairly clear: include all files in the
distribution root matching \file{*.txt}, all files anywhere under the
\file{examples} directory matching \file{*.txt} or \file{*.py}, and
exclude all directories matching \file{examples/sample?/build}. All of
this is done \emph{after} the standard include set, so you can exclude
files from the standard set with explicit instructions in the manifest
template. (Or, you can use the \longprogramopt{no-defaults} option to
disable the standard set entirely.) There are several other commands
available in the manifest template mini-language; see
section~\ref{sdist-cmd}.
The order of commands in the manifest template matters: initially, we
have the list of default files as described above, and each command in
the template adds to or removes from that list of files. Once we have
fully processed the manifest template, we remove files that should not
be included in the source distribution:
\begin{itemize}
\item all files in the Distutils ``build'' tree (default \file{build/})
\item all files in directories named \file{RCS}, \file{CVS}, \file{.svn},
\file{.hg}, \file{.git}, \file{.bzr}, or \file{\_darcs}
\end{itemize}
Now we have our complete list of files, which is written to the manifest
for future reference, and then used to build the source distribution
archive(s).
You can disable the default set of included files with the
\longprogramopt{no-defaults} option, and you can disable the standard
exclude set with \longprogramopt{no-prune}.
Following the Distutils' own manifest template, let's trace how the
\command{sdist} command builds the list of files to include in the
Distutils source distribution:
\begin{enumerate}
\item include all Python source files in the \file{distutils} and
\file{distutils/command} subdirectories (because packages
corresponding to those two directories were mentioned in the
\option{packages} option in the setup script---see
section~\ref{setup-script})
\item include \file{README.txt}, \file{setup.py}, and \file{setup.cfg}
(standard files)
\item include \file{test/test*.py} (standard files)
\item include \file{*.txt} in the distribution root (this will find
\file{README.txt} a second time, but such redundancies are weeded out
later)
\item include anything matching \file{*.txt} or \file{*.py} in the
sub-tree under \file{examples},
\item exclude all files in the sub-trees starting at directories
matching \file{examples/sample?/build}---this may exclude files
included by the previous two steps, so it's important that the
\code{prune} command in the manifest template comes after the
\code{recursive-include} command
\item exclude the entire \file{build} tree, and any \file{RCS},
\file{CVS}, \file{.svn}, \file{.hg}, \file{.git}, \file{.bzr}, or
\file{\_darcs} directories
\end{enumerate}
Just like in the setup script, file and directory names in the manifest
template should always be slash-separated; the Distutils will take care
of converting them to the standard representation on your platform.
That way, the manifest template is portable across operating systems.
\section{Manifest-related options}
\label{manifest-options}
The normal course of operations for the \command{sdist} command is as
follows:
\begin{itemize}
\item if the manifest file, \file{MANIFEST} doesn't exist, read
\file{MANIFEST.in} and create the manifest
\item if neither \file{MANIFEST} nor \file{MANIFEST.in} exist, create a
manifest with just the default file set
\item if either \file{MANIFEST.in} or the setup script (\file{setup.py})
are more recent than \file{MANIFEST}, recreate \file{MANIFEST} by
reading \file{MANIFEST.in}
\item use the list of files now in \file{MANIFEST} (either just
generated or read in) to create the source distribution archive(s)
\end{itemize}
There are a couple of options that modify this behaviour. First, use
the \longprogramopt{no-defaults} and \longprogramopt{no-prune} to
disable the standard ``include'' and ``exclude'' sets.
Second, you might want to force the manifest to be regenerated---for
example, if you have added or removed files or directories that match an
existing pattern in the manifest template, you should regenerate the
manifest:
\begin{verbatim}
python setup.py sdist --force-manifest
\end{verbatim}
Or, you might just want to (re)generate the manifest, but not create a
source distribution:
\begin{verbatim}
python setup.py sdist --manifest-only
\end{verbatim}
\longprogramopt{manifest-only} implies \longprogramopt{force-manifest}.
\programopt{-o} is a shortcut for \longprogramopt{manifest-only}, and
\programopt{-f} for \longprogramopt{force-manifest}.
\chapter{Creating Built Distributions}
\label{built-dist}
A ``built distribution'' is what you're probably used to thinking of
either as a ``binary package'' or an ``installer'' (depending on your
background). It's not necessarily binary, though, because it might
contain only Python source code and/or byte-code; and we don't call it a
package, because that word is already spoken for in Python. (And
``installer'' is a term specific to the world of mainstream desktop
systems.)
A built distribution is how you make life as easy as possible for
installers of your module distribution: for users of RPM-based Linux
systems, it's a binary RPM; for Windows users, it's an executable
installer; for Debian-based Linux users, it's a Debian package; and so
forth. Obviously, no one person will be able to create built
distributions for every platform under the sun, so the Distutils are
designed to enable module developers to concentrate on their
specialty---writing code and creating source distributions---while an
intermediary species called \emph{packagers} springs up to turn source
distributions into built distributions for as many platforms as there
are packagers.
Of course, the module developer could be his own packager; or the
packager could be a volunteer ``out there'' somewhere who has access to
a platform which the original developer does not; or it could be
software periodically grabbing new source distributions and turning them
into built distributions for as many platforms as the software has
access to. Regardless of who they are, a packager uses the
setup script and the \command{bdist} command family to generate built
distributions.
As a simple example, if I run the following command in the Distutils
source tree:
\begin{verbatim}
python setup.py bdist
\end{verbatim}
then the Distutils builds my module distribution (the Distutils itself
in this case), does a ``fake'' installation (also in the \file{build}
directory), and creates the default type of built distribution for my
platform. The default format for built distributions is a ``dumb'' tar
file on \UNIX, and a simple executable installer on Windows. (That tar
file is considered ``dumb'' because it has to be unpacked in a specific
location to work.)
Thus, the above command on a \UNIX{} system creates
\file{Distutils-1.0.\filevar{plat}.tar.gz}; unpacking this tarball
from the right place installs the Distutils just as though you had
downloaded the source distribution and run \code{python setup.py
install}. (The ``right place'' is either the root of the filesystem or
Python's \filevar{prefix} directory, depending on the options given to
the \command{bdist\_dumb} command; the default is to make dumb
distributions relative to \filevar{prefix}.)
Obviously, for pure Python distributions, this isn't any simpler than
just running \code{python setup.py install}---but for non-pure
distributions, which include extensions that would need to be
compiled, it can mean the difference between someone being able to use
your extensions or not. And creating ``smart'' built distributions,
such as an RPM package or an executable installer for Windows, is far
more convenient for users even if your distribution doesn't include
any extensions.
The \command{bdist} command has a \longprogramopt{formats} option,
similar to the \command{sdist} command, which you can use to select the
types of built distribution to generate: for example,
\begin{verbatim}
python setup.py bdist --format=zip
\end{verbatim}
would, when run on a \UNIX{} system, create
\file{Distutils-1.0.\filevar{plat}.zip}---again, this archive would be
unpacked from the root directory to install the Distutils.
The available formats for built distributions are:
\begin{tableiii}{l|l|c}{code}%
{Format}{Description}{Notes}
\lineiii{gztar}{gzipped tar file (\file{.tar.gz})}{(1),(3)}
\lineiii{ztar}{compressed tar file (\file{.tar.Z})}{(3)}
\lineiii{tar}{tar file (\file{.tar})}{(3)}
\lineiii{zip}{zip file (\file{.zip})}{(4)}
\lineiii{rpm}{RPM}{(5)}
\lineiii{pkgtool}{Solaris \program{pkgtool}}{}
\lineiii{sdux}{HP-UX \program{swinstall}}{}
\lineiii{rpm}{RPM}{(5)}
% \lineiii{srpm}{source RPM}{(5) \XXX{to do!}}
\lineiii{wininst}{self-extracting ZIP file for Windows}{(2),(4)}
\end{tableiii}
\noindent Notes:
\begin{description}
\item[(1)] default on \UNIX
\item[(2)] default on Windows \XXX{to-do!}
\item[(3)] requires external utilities: \program{tar} and possibly one
of \program{gzip}, \program{bzip2}, or \program{compress}
\item[(4)] requires either external \program{zip} utility or
\module{zipfile} module (part of the standard Python library since
Python~1.6)
\item[(5)] requires external \program{rpm} utility, version 3.0.4 or
better (use \code{rpm --version} to find out which version you have)
\end{description}
You don't have to use the \command{bdist} command with the
\longprogramopt{formats} option; you can also use the command that
directly implements the format you're interested in. Some of these
\command{bdist} ``sub-commands'' actually generate several similar
formats; for instance, the \command{bdist\_dumb} command generates all
the ``dumb'' archive formats (\code{tar}, \code{ztar}, \code{gztar}, and
\code{zip}), and \command{bdist\_rpm} generates both binary and source
RPMs. The \command{bdist} sub-commands, and the formats generated by
each, are:
\begin{tableii}{l|l}{command}%
{Command}{Formats}
\lineii{bdist\_dumb}{tar, ztar, gztar, zip}
\lineii{bdist\_rpm}{rpm, srpm}
\lineii{bdist\_wininst}{wininst}
\end{tableii}
The following sections give details on the individual \command{bdist\_*}
commands.
\section{Creating dumb built distributions}
\label{creating-dumb}
\XXX{Need to document absolute vs. prefix-relative packages here, but
first I have to implement it!}
\section{Creating RPM packages}
\label{creating-rpms}
The RPM format is used by many popular Linux distributions, including
Red Hat, SuSE, and Mandrake. If one of these (or any of the other
RPM-based Linux distributions) is your usual environment, creating RPM
packages for other users of that same distribution is trivial.
Depending on the complexity of your module distribution and differences
between Linux distributions, you may also be able to create RPMs that
work on different RPM-based distributions.
The usual way to create an RPM of your module distribution is to run the
\command{bdist\_rpm} command:
\begin{verbatim}
python setup.py bdist_rpm
\end{verbatim}
or the \command{bdist} command with the \longprogramopt{format} option:
\begin{verbatim}
python setup.py bdist --formats=rpm
\end{verbatim}
The former allows you to specify RPM-specific options; the latter allows
you to easily specify multiple formats in one run. If you need to do
both, you can explicitly specify multiple \command{bdist\_*} commands
and their options:
\begin{verbatim}
python setup.py bdist_rpm --packager="John Doe <[email protected]>" \
bdist_wininst --target_version="2.0"
\end{verbatim}
Creating RPM packages is driven by a \file{.spec} file, much as using
the Distutils is driven by the setup script. To make your life easier,
the \command{bdist\_rpm} command normally creates a \file{.spec} file
based on the information you supply in the setup script, on the command
line, and in any Distutils configuration files. Various options and
sections in the \file{.spec} file are derived from options in the setup
script as follows:
\begin{tableii}{l|l}{textrm}%
{RPM \file{.spec} file option or section}{Distutils setup script option}
\lineii{Name}{\option{name}}
\lineii{Summary (in preamble)}{\option{description}}
\lineii{Version}{\option{version}}
\lineii{Vendor}{\option{author} and \option{author\_email}, or \\&
\option{maintainer} and \option{maintainer\_email}}
\lineii{Copyright}{\option{licence}}
\lineii{Url}{\option{url}}
\lineii{\%description (section)}{\option{long\_description}}
\end{tableii}
Additionally, there are many options in \file{.spec} files that don't have
corresponding options in the setup script. Most of these are handled
through options to the \command{bdist\_rpm} command as follows:
\begin{tableiii}{l|l|l}{textrm}%
{RPM \file{.spec} file option or section}%
{\command{bdist\_rpm} option}%
{default value}
\lineiii{Release}{\option{release}}{``1''}
\lineiii{Group}{\option{group}}{``Development/Libraries''}
\lineiii{Vendor}{\option{vendor}}{(see above)}
\lineiii{Packager}{\option{packager}}{(none)}
\lineiii{Provides}{\option{provides}}{(none)}
\lineiii{Requires}{\option{requires}}{(none)}
\lineiii{Conflicts}{\option{conflicts}}{(none)}
\lineiii{Obsoletes}{\option{obsoletes}}{(none)}
\lineiii{Distribution}{\option{distribution\_name}}{(none)}
\lineiii{BuildRequires}{\option{build\_requires}}{(none)}
\lineiii{Icon}{\option{icon}}{(none)}
\end{tableiii}
Obviously, supplying even a few of these options on the command-line
would be tedious and error-prone, so it's usually best to put them in
the setup configuration file, \file{setup.cfg}---see
section~\ref{setup-config}. If you distribute or package many Python
module distributions, you might want to put options that apply to all of
them in your personal Distutils configuration file
(\file{\textasciitilde/.pydistutils.cfg}).
There are three steps to building a binary RPM package, all of which are
handled automatically by the Distutils:
\begin{enumerate}
\item create a \file{.spec} file, which describes the package (analogous
to the Distutils setup script; in fact, much of the information in the
setup script winds up in the \file{.spec} file)
\item create the source RPM
\item create the ``binary'' RPM (which may or may not contain binary
code, depending on whether your module distribution contains Python
extensions)
\end{enumerate}
Normally, RPM bundles the last two steps together; when you use the
Distutils, all three steps are typically bundled together.
If you wish, you can separate these three steps. You can use the
\longprogramopt{spec-only} option to make \command{bdist_rpm} just
create the \file{.spec} file and exit; in this case, the \file{.spec}
file will be written to the ``distribution directory''---normally
\file{dist/}, but customizable with the \longprogramopt{dist-dir}
option. (Normally, the \file{.spec} file winds up deep in the ``build
tree,'' in a temporary directory created by \command{bdist_rpm}.)
% \XXX{this isn't implemented yet---is it needed?!}
% You can also specify a custom \file{.spec} file with the
% \longprogramopt{spec-file} option; used in conjunction with
% \longprogramopt{spec-only}, this gives you an opportunity to customize
% the \file{.spec} file manually:
%
% \ begin{verbatim}
% > python setup.py bdist_rpm --spec-only
% # ...edit dist/FooBar-1.0.spec
% > python setup.py bdist_rpm --spec-file=dist/FooBar-1.0.spec
% \ end{verbatim}
%
% (Although a better way to do this is probably to override the standard
% \command{bdist\_rpm} command with one that writes whatever else you want
% to the \file{.spec} file.)
\section{Creating Windows Installers}
\label{creating-wininst}
Executable installers are the natural format for binary distributions
on Windows. They display a nice graphical user interface, display
some information about the module distribution to be installed taken
from the metadata in the setup script, let the user select a few
options, and start or cancel the installation.
Since the metadata is taken from the setup script, creating Windows
installers is usually as easy as running:
\begin{verbatim}
python setup.py bdist_wininst
\end{verbatim}
or the \command{bdist} command with the \longprogramopt{formats} option:
\begin{verbatim}
python setup.py bdist --formats=wininst
\end{verbatim}
If you have a pure module distribution (only containing pure Python
modules and packages), the resulting installer will be version
independent and have a name like \file{foo-1.0.win32.exe}. These
installers can even be created on \UNIX{} or Mac OS platforms.
If you have a non-pure distribution, the extensions can only be
created on a Windows platform, and will be Python version dependent.
The installer filename will reflect this and now has the form
\file{foo-1.0.win32-py2.0.exe}. You have to create a separate installer
for every Python version you want to support.
The installer will try to compile pure modules into bytecode after
installation on the target system in normal and optimizing mode. If
you don't want this to happen for some reason, you can run the
\command{bdist_wininst} command with the
\longprogramopt{no-target-compile} and/or the
\longprogramopt{no-target-optimize} option.
By default the installer will display the cool ``Python Powered'' logo
when it is run, but you can also supply your own bitmap which must be
a Windows \file{.bmp} file with the \longprogramopt{bitmap} option.
The installer will also display a large title on the desktop
background window when it is run, which is constructed from the name
of your distribution and the version number. This can be changed to
another text by using the \longprogramopt{title} option.
The installer file will be written to the ``distribution directory''
--- normally \file{dist/}, but customizable with the
\longprogramopt{dist-dir} option.
\subsection{The Postinstallation script}
\label{postinstallation-script}
Starting with Python 2.3, a postinstallation script can be specified
which the \longprogramopt{install-script} option. The basename of the
script must be specified, and the script filename must also be listed
in the scripts argument to the setup function.
This script will be run at installation time on the target system
after all the files have been copied, with \code{argv[1]} set to
\programopt{-install}, and again at uninstallation time before the
files are removed with \code{argv[1]} set to \programopt{-remove}.
The installation script runs embedded in the windows installer, every
output (\code{sys.stdout}, \code{sys.stderr}) is redirected into a
buffer and will be displayed in the GUI after the script has finished.
Some functions especially useful in this context are available as
additional built-in functions in the installation script.
\begin{funcdesc}{directory_created}{path}
\funcline{file_created}{path}
These functions should be called when a directory or file is created
by the postinstall script at installation time. It will register
\var{path} with the uninstaller, so that it will be removed when the
distribution is uninstalled. To be safe, directories are only removed
if they are empty.
\end{funcdesc}
\begin{funcdesc}{get_special_folder_path}{csidl_string}
This function can be used to retrieve special folder locations on
Windows like the Start Menu or the Desktop. It returns the full
path to the folder. \var{csidl_string} must be one of the following
strings:
\begin{verbatim}
"CSIDL_APPDATA"
"CSIDL_COMMON_STARTMENU"
"CSIDL_STARTMENU"
"CSIDL_COMMON_DESKTOPDIRECTORY"
"CSIDL_DESKTOPDIRECTORY"
"CSIDL_COMMON_STARTUP"
"CSIDL_STARTUP"
"CSIDL_COMMON_PROGRAMS"
"CSIDL_PROGRAMS"
"CSIDL_FONTS"
\end{verbatim}
If the folder cannot be retrieved, \exception{OSError} is raised.
Which folders are available depends on the exact Windows version,
and probably also the configuration. For details refer to
Microsoft's documentation of the
\cfunction{SHGetSpecialFolderPath()} function.
\end{funcdesc}
\begin{funcdesc}{create_shortcut}{target, description,
filename\optional{,
arguments\optional{,
workdir\optional{,
iconpath\optional{, iconindex}}}}}
This function creates a shortcut.
\var{target} is the path to the program to be started by the shortcut.
\var{description} is the description of the shortcut.
\var{filename} is the title of the shortcut that the user will see.
\var{arguments} specifies the command line arguments, if any.
\var{workdir} is the working directory for the program.
\var{iconpath} is the file containing the icon for the shortcut,
and \var{iconindex} is the index of the icon in the file
\var{iconpath}. Again, for details consult the Microsoft
documentation for the \class{IShellLink} interface.
\end{funcdesc}
\chapter{Registering with the Package Index}
\label{package-index}
The Python Package Index (PyPI) holds meta-data describing distributions
packaged with distutils. The distutils command \command{register} is
used to submit your distribution's meta-data to the index. It is invoked
as follows:
\begin{verbatim}
python setup.py register
\end{verbatim}
Distutils will respond with the following prompt:
\begin{verbatim}
running register
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]:
\end{verbatim}
\noindent Note: if your username and password are saved locally, you will
not see this menu.
If you have not registered with PyPI, then you will need to do so now. You
should choose option 2, and enter your details as required. Soon after
submitting your details, you will receive an email which will be used to
confirm your registration.
Once you are registered, you may choose option 1 from the menu. You will
be prompted for your PyPI username and password, and \command{register}
will then submit your meta-data to the index.
You may submit any number of versions of your distribution to the index. If
you alter the meta-data for a particular version, you may submit it again
and the index will be updated.
PyPI holds a record for each (name, version) combination submitted. The
first user to submit information for a given name is designated the Owner
of that name. They may submit changes through the \command{register}
command or through the web interface. They may also designate other users
as Owners or Maintainers. Maintainers may edit the package information, but
not designate other Owners or Maintainers.
By default PyPI will list all versions of a given package. To hide certain
versions, the Hidden property should be set to yes. This must be edited
through the web interface.
\section{The .pypirc file}
\label{pypirc}
The format of the \file{.pypirc} file is formated as follows:
\begin{verbatim}
[server-login]
repository: <repository-url>
username: <username>
password: <password>
\end{verbatim}
\var{repository} can be ommitted and defaults to
\code{http://www.python.org/pypi}.
\chapter{Uploading Packages to the Package Index}
\label{package-upload}
\versionadded{2.5}
The Python Package Index (PyPI) not only stores the package info, but also
the package data if the author of the package wishes to. The distutils
command \command{upload} pushes the distribution files to PyPI.
The command is invoked immediately after building one or more distribution
files. For example, the command
\begin{verbatim}
python setup.py sdist bdist_wininst upload
\end{verbatim}
will cause the source distribution and the Windows installer to be
uploaded to PyPI. Note that these will be uploaded even if they are
built using an earlier invocation of \file{setup.py}, but that only
distributions named on the command line for the invocation including
the \command{upload} command are uploaded.
The \command{upload} command uses the username, password, and repository
URL from the \file{\$HOME/.pypirc} file (see section~\ref{pypirc} for
more on this file).
You can use the \longprogramopt{sign} option to tell \command{upload} to
sign each uploaded file using GPG (GNU Privacy Guard). The
\program{gpg} program must be available for execution on the system
\envvar{PATH}. You can also specify which key to use for signing
using the \longprogramopt{identity=\var{name}} option.
Other \command{upload} options include
\longprogramopt{repository=\var{url}} (which lets you override the
repository setting from \file{\$HOME/.pypirc}), and
\longprogramopt{show-response} (which displays the full response text
from the PyPI server for help in debugging upload problems).
\chapter{Examples}
\label{examples}
This chapter provides a number of basic examples to help get started
with distutils. Additional information about using distutils can be
found in the Distutils Cookbook.
\begin{seealso}
\seelink{http://www.python.org/cgi-bin/moinmoin/DistutilsCookbook}
{Distutils Cookbook}
{Collection of recipes showing how to achieve more control
over distutils.}
\end{seealso}
\section{Pure Python distribution (by module)}
\label{pure-mod}
If you're just distributing a couple of modules, especially if they
don't live in a particular package, you can specify them individually
using the \option{py\_modules} option in the setup script.
In the simplest case, you'll have two files to worry about: a setup
script and the single module you're distributing, \file{foo.py} in this
example:
\begin{verbatim}
<root>/
setup.py
foo.py
\end{verbatim}
(In all diagrams in this section, \verb|<root>| will refer to the
distribution root directory.) A minimal setup script to describe this
situation would be:
\begin{verbatim}
from distutils.core import setup
setup(name='foo',
version='1.0',
py_modules=['foo'],
)
\end{verbatim}
Note that the name of the distribution is specified independently with
the \option{name} option, and there's no rule that says it has to be the
same as the name of the sole module in the distribution (although that's
probably a good convention to follow). However, the distribution name
is used to generate filenames, so you should stick to letters, digits,
underscores, and hyphens.
Since \option{py\_modules} is a list, you can of course specify multiple
modules, eg. if you're distributing modules \module{foo} and
\module{bar}, your setup might look like this:
\begin{verbatim}
<root>/
setup.py
foo.py
bar.py
\end{verbatim}
and the setup script might be
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
py_modules=['foo', 'bar'],
)
\end{verbatim}
You can put module source files into another directory, but if you have
enough modules to do that, it's probably easier to specify modules by
package rather than listing them individually.
\section{Pure Python distribution (by package)}
\label{pure-pkg}
If you have more than a couple of modules to distribute, especially if
they are in multiple packages, it's probably easier to specify whole
packages rather than individual modules. This works even if your
modules are not in a package; you can just tell the Distutils to process
modules from the root package, and that works the same as any other
package (except that you don't have to have an \file{\_\_init\_\_.py}
file).
The setup script from the last example could also be written as
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
packages=[''],
)
\end{verbatim}
(The empty string stands for the root package.)
If those two files are moved into a subdirectory, but remain in the root
package, e.g.:
\begin{verbatim}
<root>/
setup.py
src/ foo.py
bar.py
\end{verbatim}
then you would still specify the root package, but you have to tell the
Distutils where source files in the root package live:
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
package_dir={'': 'src'},
packages=[''],
)
\end{verbatim}
More typically, though, you will want to distribute multiple modules in
the same package (or in sub-packages). For example, if the \module{foo}
and \module{bar} modules belong in package \module{foobar}, one way to
layout your source tree is
\begin{verbatim}
<root>/
setup.py
foobar/
__init__.py
foo.py
bar.py
\end{verbatim}
This is in fact the default layout expected by the Distutils, and the
one that requires the least work to describe in your setup script:
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
packages=['foobar'],
)
\end{verbatim}
If you want to put modules in directories not named for their package,
then you need to use the \option{package\_dir} option again. For
example, if the \file{src} directory holds modules in the
\module{foobar} package:
\begin{verbatim}
<root>/
setup.py
src/
__init__.py
foo.py
bar.py
\end{verbatim}
an appropriate setup script would be
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
package_dir={'foobar': 'src'},
packages=['foobar'],
)
\end{verbatim}
Or, you might put modules from your main package right in the
distribution root:
\begin{verbatim}
<root>/
setup.py
__init__.py
foo.py
bar.py
\end{verbatim}
in which case your setup script would be
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
package_dir={'foobar': ''},
packages=['foobar'],
)
\end{verbatim}
(The empty string also stands for the current directory.)
If you have sub-packages, they must be explicitly listed in
\option{packages}, but any entries in \option{package\_dir}
automatically extend to sub-packages. (In other words, the Distutils
does \emph{not} scan your source tree, trying to figure out which
directories correspond to Python packages by looking for
\file{\_\_init\_\_.py} files.) Thus, if the default layout grows a
sub-package:
\begin{verbatim}
<root>/
setup.py
foobar/
__init__.py
foo.py
bar.py
subfoo/
__init__.py
blah.py
\end{verbatim}
then the corresponding setup script would be
\begin{verbatim}
from distutils.core import setup
setup(name='foobar',
version='1.0',
packages=['foobar', 'foobar.subfoo'],
)
\end{verbatim}
(Again, the empty string in \option{package\_dir} stands for the current
directory.)
\section{Single extension module}
\label{single-ext}
Extension modules are specified using the \option{ext\_modules} option.
\option{package\_dir} has no effect on where extension source files are
found; it only affects the source for pure Python modules. The simplest
case, a single extension module in a single C source file, is:
\begin{verbatim}
<root>/
setup.py
foo.c
\end{verbatim}
If the \module{foo} extension belongs in the root package, the setup
script for this could be
\begin{verbatim}
from distutils.core import setup
from distutils.extension import Extension
setup(name='foobar',
version='1.0',
ext_modules=[Extension('foo', ['foo.c'])],
)
\end{verbatim}
If the extension actually belongs in a package, say \module{foopkg},
then
With exactly the same source tree layout, this extension can be put in
the \module{foopkg} package simply by changing the name of the
extension:
\begin{verbatim}
from distutils.core import setup
from distutils.extension import Extension
setup(name='foobar',
version='1.0',
ext_modules=[Extension('foopkg.foo', ['foo.c'])],
)
\end{verbatim}
%\section{Multiple extension modules}
%\label{multiple-ext}
%\section{Putting it all together}
\chapter{Extending Distutils \label{extending}}
Distutils can be extended in various ways. Most extensions take the
form of new commands or replacements for existing commands. New
commands may be written to support new types of platform-specific
packaging, for example, while replacements for existing commands may
be made to modify details of how the command operates on a package.
Most extensions of the distutils are made within \file{setup.py}
scripts that want to modify existing commands; many simply add a few
file extensions that should be copied into packages in addition to
\file{.py} files as a convenience.
Most distutils command implementations are subclasses of the
\class{Command} class from \refmodule{distutils.cmd}. New commands
may directly inherit from \class{Command}, while replacements often
derive from \class{Command} indirectly, directly subclassing the
command they are replacing. Commands are required to derive from
\class{Command}.
%\section{Extending existing commands}
%\label{extend-existing}
%\section{Writing new commands}
%\label{new-commands}
%\XXX{Would an uninstall command be a good example here?}
\section{Integrating new commands}
There are different ways to integrate new command implementations into
distutils. The most difficult is to lobby for the inclusion of the
new features in distutils itself, and wait for (and require) a version
of Python that provides that support. This is really hard for many
reasons.
The most common, and possibly the most reasonable for most needs, is
to include the new implementations with your \file{setup.py} script,
and cause the \function{distutils.core.setup()} function use them:
\begin{verbatim}
from distutils.command.build_py import build_py as _build_py
from distutils.core import setup
class build_py(_build_py):
"""Specialized Python source builder."""
# implement whatever needs to be different...
setup(cmdclass={'build_py': build_py},
...)
\end{verbatim}
This approach is most valuable if the new implementations must be used
to use a particular package, as everyone interested in the package
will need to have the new command implementation.
Beginning with Python 2.4, a third option is available, intended to
allow new commands to be added which can support existing
\file{setup.py} scripts without requiring modifications to the Python
installation. This is expected to allow third-party extensions to
provide support for additional packaging systems, but the commands can
be used for anything distutils commands can be used for. A new
configuration option, \option{command\_packages} (command-line option
\longprogramopt{command-packages}), can be used to specify additional
packages to be searched for modules implementing commands. Like all
distutils options, this can be specified on the command line or in a
configuration file. This option can only be set in the
\code{[global]} section of a configuration file, or before any
commands on the command line. If set in a configuration file, it can
be overridden from the command line; setting it to an empty string on
the command line causes the default to be used. This should never be
set in a configuration file provided with a package.
This new option can be used to add any number of packages to the list
of packages searched for command implementations; multiple package
names should be separated by commas. When not specified, the search
is only performed in the \module{distutils.command} package. When
\file{setup.py} is run with the option
\longprogramopt{command-packages} \programopt{distcmds,buildcmds},
however, the packages \module{distutils.command}, \module{distcmds},
and \module{buildcmds} will be searched in that order. New commands
are expected to be implemented in modules of the same name as the
command by classes sharing the same name. Given the example command
line option above, the command \command{bdist\_openpkg} could be
implemented by the class \class{distcmds.bdist_openpkg.bdist_openpkg}
or \class{buildcmds.bdist_openpkg.bdist_openpkg}.
\section{Adding new distribution types}
Commands that create distributions (files in the \file{dist/}
directory) need to add \code{(\var{command}, \var{filename})} pairs to
\code{self.distribution.dist_files} so that \command{upload} can
upload it to PyPI. The \var{filename} in the pair contains no path
information, only the name of the file itself. In dry-run mode, pairs
should still be added to represent what would have been created.
\chapter{Command Reference}
\label{reference}
%\section{Building modules: the \protect\command{build} command family}
%\label{build-cmds}
%\subsubsection{\protect\command{build}}
%\label{build-cmd}
%\subsubsection{\protect\command{build\_py}}
%\label{build-py-cmd}
%\subsubsection{\protect\command{build\_ext}}
%\label{build-ext-cmd}
%\subsubsection{\protect\command{build\_clib}}
%\label{build-clib-cmd}
\section{Installing modules: the \protect\command{install} command family}
\label{install-cmd}
The install command ensures that the build commands have been run and then
runs the subcommands \command{install\_lib},
\command{install\_data} and
\command{install\_scripts}.
%\subsubsection{\protect\command{install\_lib}}
%\label{install-lib-cmd}
\subsection{\protect\command{install\_data}}
\label{install-data-cmd}
This command installs all data files provided with the distribution.
\subsection{\protect\command{install\_scripts}}
\label{install-scripts-cmd}
This command installs all (Python) scripts in the distribution.
%\subsection{Cleaning up: the \protect\command{clean} command}
%\label{clean-cmd}
\section{Creating a source distribution: the
\protect\command{sdist} command}
\label{sdist-cmd}
\XXX{fragment moved down from above: needs context!}
The manifest template commands are:
\begin{tableii}{ll}{command}{Command}{Description}
\lineii{include \var{pat1} \var{pat2} ... }
{include all files matching any of the listed patterns}
\lineii{exclude \var{pat1} \var{pat2} ... }
{exclude all files matching any of the listed patterns}
\lineii{recursive-include \var{dir} \var{pat1} \var{pat2} ... }
{include all files under \var{dir} matching any of the listed patterns}
\lineii{recursive-exclude \var{dir} \var{pat1} \var{pat2} ...}
{exclude all files under \var{dir} matching any of the listed patterns}
\lineii{global-include \var{pat1} \var{pat2} ...}
{include all files anywhere in the source tree matching\\&
any of the listed patterns}
\lineii{global-exclude \var{pat1} \var{pat2} ...}
{exclude all files anywhere in the source tree matching\\&
any of the listed patterns}
\lineii{prune \var{dir}}{exclude all files under \var{dir}}
\lineii{graft \var{dir}}{include all files under \var{dir}}
\end{tableii}
The patterns here are \UNIX-style ``glob'' patterns: \code{*} matches any
sequence of regular filename characters, \code{?} matches any single
regular filename character, and \code{[\var{range}]} matches any of the
characters in \var{range} (e.g., \code{a-z}, \code{a-zA-Z},
\code{a-f0-9\_.}). The definition of ``regular filename character'' is
platform-specific: on \UNIX{} it is anything except slash; on Windows
anything except backslash or colon; on Mac OS 9 anything except colon.
\XXX{Windows support not there yet}
%\section{Creating a built distribution: the
% \protect\command{bdist} command family}
%\label{bdist-cmds}
%\subsection{\protect\command{bdist}}
%\subsection{\protect\command{bdist\_dumb}}
%\subsection{\protect\command{bdist\_rpm}}
%\subsection{\protect\command{bdist\_wininst}}
\chapter{API Reference \label{api-reference}}
\section{\module{distutils.core} --- Core Distutils functionality}
\declaremodule{standard}{distutils.core}
\modulesynopsis{The core Distutils functionality}
The \module{distutils.core} module is the only module that needs to be
installed to use the Distutils. It provides the \function{setup()} (which
is called from the setup script). Indirectly provides the
\class{distutils.dist.Distribution} and \class{distutils.cmd.Command} class.
\begin{funcdesc}{setup}{arguments}
The basic do-everything function that does most everything you could ever
ask for from a Distutils method. See XXXXX
The setup function takes a large number of arguments. These
are laid out in the following table.
\begin{tableiii}{c|l|l}{argument name}{argument name}{value}{type}
\lineiii{name}{The name of the package}{a string}
\lineiii{version}{The version number of the package}{See \refmodule{distutils.version}}
\lineiii{description}{A single line describing the package}{a string}
\lineiii{long_description}{Longer description of the package}{a string}
\lineiii{author}{The name of the package author}{a string}
\lineiii{author_email}{The email address of the package author}{a string}
\lineiii{maintainer}{The name of the current maintainer, if different from the author}{a string}
\lineiii{maintainer_email}{The email address of the current maintainer, if different from the author}{}
\lineiii{url}{A URL for the package (homepage)}{a URL}
\lineiii{download_url}{A URL to download the package}{a URL}
\lineiii{packages}{A list of Python packages that distutils will manipulate}{a list of strings}
\lineiii{py_modules}{A list of Python modules that distutils will manipulate}{a list of strings}
\lineiii{scripts}{A list of standalone script files to be built and installed}{a list of strings}
\lineiii{ext_modules}{A list of Python extensions to be built}{A list of
instances of \class{distutils.core.Extension}}
\lineiii{classifiers}{A list of categories for the package}{The list of available categorizations is at \url{http://cheeseshop.python.org/pypi?:action=list_classifiers}.}
\lineiii{distclass}{the \class{Distribution} class to use}{A subclass of \class{distutils.core.Distribution}}
% What on earth is the use case for script_name?
\lineiii{script_name}{The name of the setup.py script - defaults to \code{sys.argv[0]}}{a string}
\lineiii{script_args}{Arguments to supply to the setup script}{a list of strings}
\lineiii{options}{default options for the setup script}{a string}
\lineiii{license}{The license for the package}{}
\lineiii{keywords}{Descriptive meta-data. See \pep{314}}{}
\lineiii{platforms}{}{}
\lineiii{cmdclass}{A mapping of command names to \class{Command} subclasses}{a dictionary}
\end{tableiii}
\end{funcdesc}
\begin{funcdesc}{run_setup}{script_name\optional{, script_args=\code{None}, stop_after=\code{'run'}}}
Run a setup script in a somewhat controlled environment, and return
the \class{distutils.dist.Distribution} instance that drives things.
This is useful if you need to find out the distribution meta-data
(passed as keyword args from \var{script} to \function{setup()}), or
the contents of the config files or command-line.
\var{script_name} is a file that will be run with \function{execfile()}
\code{sys.argv[0]} will be replaced with \var{script} for the duration of the
call. \var{script_args} is a list of strings; if supplied,
\code{sys.argv[1:]} will be replaced by \var{script_args} for the duration
of the call.
\var{stop_after} tells \function{setup()} when to stop processing; possible
values:
\begin{tableii}{c|l}{value}{value}{description}
\lineii{init}{Stop after the \class{Distribution} instance has been created
and populated with the keyword arguments to \function{setup()}}
\lineii{config}{Stop after config files have been parsed (and their data
stored in the \class{Distribution} instance)}
\lineii{commandline}{Stop after the command-line (\code{sys.argv[1:]} or
\var{script_args}) have been parsed (and the data stored in the
\class{Distribution} instance.)}
\lineii{run}{Stop after all commands have been run (the same as
if \function{setup()} had been called in the usual way). This is the default
value.}
\end{tableii}
\end{funcdesc}
In addition, the \module{distutils.core} module exposed a number of
classes that live elsewhere.
\begin{itemize}
\item \class{Extension} from \refmodule{distutils.extension}
\item \class{Command} from \refmodule{distutils.cmd}
\item \class{Distribution} from \refmodule{distutils.dist}
\end{itemize}
A short description of each of these follows, but see the relevant
module for the full reference.
\begin{classdesc*}{Extension}
The Extension class describes a single C or \Cpp extension module in a
setup script. It accepts the following keyword arguments in its
constructor
\begin{tableiii}{c|l|l}{argument name}{argument name}{value}{type}
\lineiii{name}{the full name of the extension, including any packages
--- ie. \emph{not} a filename or pathname, but Python dotted name}{string}
\lineiii{sources}{list of source filenames, relative to the distribution
root (where the setup script lives), in \UNIX{} form (slash-separated) for
portability. Source files may be C, \Cpp, SWIG (.i), platform-specific
resource files, or whatever else is recognized by the \command{build_ext}
command as source for a Python extension.}{string}
\lineiii{include_dirs}{list of directories to search for C/\Cpp{} header
files (in \UNIX{} form for portability)}{string}
\lineiii{define_macros}{list of macros to define; each macro is defined
using a 2-tuple, where 'value' is either the string to define it to or
\code{None} to define it without a particular value (equivalent of
\code{\#define FOO} in source or \programopt{-DFOO} on \UNIX{} C
compiler command line) }{ (string,string)
tuple or (name,\code{None}) }
\lineiii{undef_macros}{list of macros to undefine explicitly}{string}
\lineiii{library_dirs}{list of directories to search for C/\Cpp{} libraries
at link time }{string}
\lineiii{libraries}{list of library names (not filenames or paths) to
link against }{string}
\lineiii{runtime_library_dirs}{list of directories to search for C/\Cpp{}
libraries at run time (for shared extensions, this is when the extension
is loaded)}{string}
\lineiii{extra_objects}{list of extra files to link with (eg. object
files not implied by 'sources', static library that must be explicitly
specified, binary resource files, etc.)}{string}
\lineiii{extra_compile_args}{any extra platform- and compiler-specific
information to use when compiling the source files in 'sources'. For
platforms and compilers where a command line makes sense, this is
typically a list of command-line arguments, but for other platforms it
could be anything.}{string}
\lineiii{extra_link_args}{any extra platform- and compiler-specific
information to use when linking object files together to create the
extension (or to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.}{string}
\lineiii{export_symbols}{list of symbols to be exported from a shared
extension. Not used on all platforms, and not generally necessary for
Python extensions, which typically export exactly one symbol: \code{init} +
extension_name. }{string}
\lineiii{depends}{list of files that the extension depends on }{string}
\lineiii{language}{extension language (i.e. \code{'c'}, \code{'c++'},
\code{'objc'}). Will be detected from the source extensions if not provided.
}{string}
\end{tableiii}
\end{classdesc*}
\begin{classdesc*}{Distribution}
A \class{Distribution} describes how to build, install and package up a
Python software package.
See the \function{setup()} function for a list of keyword arguments accepted
by the Distribution constructor. \function{setup()} creates a Distribution
instance.
\end{classdesc*}
\begin{classdesc*}{Command}
A \class{Command} class (or rather, an instance of one of its subclasses)
implement a single distutils command.
\end{classdesc*}
\section{\module{distutils.ccompiler} --- CCompiler base class}
\declaremodule{standard}{distutils.ccompiler}
\modulesynopsis{Abstract CCompiler class}
This module provides the abstract base class for the \class{CCompiler}
classes. A \class{CCompiler} instance can be used for all the compile
and link steps needed to build a single project. Methods are provided to
set options for the compiler --- macro definitions, include directories,
link path, libraries and the like.
This module provides the following functions.
\begin{funcdesc}{gen_lib_options}{compiler, library_dirs, runtime_library_dirs, libraries}
Generate linker options for searching library directories and
linking with specific libraries. \var{libraries} and \var{library_dirs} are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
\end{funcdesc}
\begin{funcdesc}{gen_preprocess_options}{macros, include_dirs}
Generate C pre-processor options (\programopt{-D}, \programopt{-U},
\programopt{-I}) as used by at least
two types of compilers: the typical \UNIX{} compiler and Visual \Cpp.
\var{macros} is the usual thing, a list of 1- or 2-tuples, where
\code{(\var{name},)} means undefine (\programopt{-U}) macro \var{name},
and \code{(\var{name}, \var{value})} means define (\programopt{-D})
macro \var{name} to \var{value}. \var{include_dirs} is just a list of
directory names to be added to the header file search path (\programopt{-I}).
Returns a list of command-line options suitable for either \UNIX{} compilers
or Visual \Cpp.
\end{funcdesc}
\begin{funcdesc}{get_default_compiler}{osname, platform}
Determine the default compiler to use for the given platform.
\var{osname} should be one of the standard Python OS names (i.e.\ the
ones returned by \code{os.name}) and \var{platform} the common value
returned by \code{sys.platform} for the platform in question.
The default values are \code{os.name} and \code{sys.platform} in case the
parameters are not given.
\end{funcdesc}
\begin{funcdesc}{new_compiler}{plat=\code{None}, compiler=\code{None}, verbose=\code{0}, dry_run=\code{0}, force=\code{0}}
Factory function to generate an instance of some CCompiler subclass
for the supplied platform/compiler combination. \var{plat} defaults
to \code{os.name} (eg. \code{'posix'}, \code{'nt'}), and \var{compiler}
defaults to the default compiler for that platform. Currently only
\code{'posix'} and \code{'nt'} are supported, and the default
compilers are ``traditional \UNIX{} interface'' (\class{UnixCCompiler}
class) and Visual \Cpp (\class{MSVCCompiler} class). Note that it's
perfectly possible to ask for a \UNIX{} compiler object under Windows,
and a Microsoft compiler object under \UNIX---if you supply a value
for \var{compiler}, \var{plat} is ignored.
% Is the posix/nt only thing still true? Mac OS X seems to work, and
% returns a UnixCCompiler instance. How to document this... hmm.
\end{funcdesc}
\begin{funcdesc}{show_compilers}{}
Print list of available compilers (used by the
\longprogramopt{help-compiler} options to \command{build},
\command{build_ext}, \command{build_clib}).
\end{funcdesc}
\begin{classdesc}{CCompiler}{\optional{verbose=\code{0}, dry_run=\code{0}, force=\code{0}}}
The abstract base class \class{CCompiler} defines the interface that
must be implemented by real compiler classes. The class also has
some utility methods used by several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps --- include directories, macros to define, libraries to link
against, etc. --- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
The constructor for each subclass creates an instance of the Compiler
object. Flags are \var{verbose} (show verbose output), \var{dry_run}
(don't actually execute the steps) and \var{force} (rebuild
everything, regardless of dependencies). All of these flags default to
\code{0} (off). Note that you probably don't want to instantiate
\class{CCompiler} or one of its subclasses directly - use the
\function{distutils.CCompiler.new_compiler()} factory function
instead.
The following methods allow you to manually alter compiler options for
the instance of the Compiler class.
\begin{methoddesc}{add_include_dir}{dir}
Add \var{dir} to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
\method{add_include_dir()}.
\end{methoddesc}
\begin{methoddesc}{set_include_dirs}{dirs}
Set the list of directories that will be searched to \var{dirs} (a
list of strings). Overrides any preceding calls to
\method{add_include_dir()}; subsequent calls to
\method{add_include_dir()} add to the list passed to
\method{set_include_dirs()}. This does not affect any list of
standard include directories that the compiler may search by default.
\end{methoddesc}
\begin{methoddesc}{add_library}{libname}
Add \var{libname} to the list of libraries that will be included in
all links driven by this compiler object. Note that \var{libname}
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to \method{add_library()} and/or
\method{set_libraries()}. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
\end{methoddesc}
\begin{methoddesc}{set_libraries}{libnames}
Set the list of libraries to be included in all links driven by
this compiler object to \var{libnames} (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
\end{methoddesc}
\begin{methoddesc}{add_library_dir}{dir}
Add \var{dir} to the list of directories that will be searched for
libraries specified to \method{add_library()} and
\method{set_libraries()}. The linker will be instructed to search for
libraries in the order they are supplied to \method{add_library_dir()}
and/or \method{set_library_dirs()}.
\end{methoddesc}
\begin{methoddesc}{set_library_dirs}{dirs}
Set the list of library search directories to \var{dirs} (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
\end{methoddesc}
\begin{methoddesc}{add_runtime_library_dir}{dir}
Add \var{dir} to the list of directories that will be searched for
shared libraries at runtime.
\end{methoddesc}
\begin{methoddesc}{set_runtime_library_dirs}{dirs}
Set the list of directories to search for shared libraries at
runtime to \var{dirs} (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
\end{methoddesc}
\begin{methoddesc}{define_macro}{name\optional{, value=\code{None}}}
Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter \var{value} should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
\end{methoddesc}
\begin{methoddesc}{undefine_macro}{name}
Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
\method{define_macro()} and undefined by \method{undefine_macro()}
the last call takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to \method{compile()}), then that
takes precedence.
\end{methoddesc}
\begin{methoddesc}{add_link_object}{object}
Add \var{object} to the list of object files (or analogues, such as
explicitly named library files or the output of ``resource
compilers'') to be included in every link driven by this compiler
object.
\end{methoddesc}
\begin{methoddesc}{set_link_objects}{objects}
Set the list of object files (or analogues) to be included in
every link to \var{objects}. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
\end{methoddesc}
The following methods implement methods for autodetection of compiler
options, providing some functionality similar to GNU \program{autoconf}.
\begin{methoddesc}{detect_language}{sources}
Detect the language of a given file, or list of files. Uses the
instance attributes \member{language_map} (a dictionary), and
\member{language_order} (a list) to do the job.
\end{methoddesc}
\begin{methoddesc}{find_library_file}{dirs, lib\optional{, debug=\code{0}}}
Search the specified list of directories for a static or shared
library file \var{lib} and return the full path to that file. If
\var{debug} is true, look for a debugging version (if that makes sense on
the current platform). Return \code{None} if \var{lib} wasn't found in any of
the specified directories.
\end{methoddesc}
\begin{methoddesc}{has_function}{funcname \optional{, includes=\code{None}, include_dirs=\code{None}, libraries=\code{None}, library_dirs=\code{None}}}
Return a boolean indicating whether \var{funcname} is supported on
the current platform. The optional arguments can be used to
augment the compilation environment by providing additional include
files and paths and libraries and paths.
\end{methoddesc}
\begin{methoddesc}{library_dir_option}{dir}
Return the compiler option to add \var{dir} to the list of
directories searched for libraries.
\end{methoddesc}
\begin{methoddesc}{library_option}{lib}
Return the compiler option to add \var{dir} to the list of libraries
linked into the shared library or executable.
\end{methoddesc}
\begin{methoddesc}{runtime_library_dir_option}{dir}
Return the compiler option to add \var{dir} to the list of
directories searched for runtime libraries.
\end{methoddesc}
\begin{methoddesc}{set_executables}{**args}
Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
\begin{tableii}{l|l}{attribute}{attribute}{description}
\lineii{compiler}{the C/\Cpp{} compiler}
\lineii{linker_so}{linker used to create shared objects and libraries}
\lineii{linker_exe}{linker used to create binary executables}
\lineii{archiver}{static library creator}
\end{tableii}
On platforms with a command-line (\UNIX, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
\UNIX{} shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
\function{distutils.util.split_quoted()}.)
\end{methoddesc}
The following methods invoke stages in the build process.
\begin{methoddesc}{compile}{sources\optional{, output_dir=\code{None}, macros=\code{None}, include_dirs=\code{None}, debug=\code{0}, extra_preargs=\code{None}, extra_postargs=\code{None}, depends=\code{None}}}
Compile one or more source files. Generates object files (e.g.
transforms a \file{.c} file to a \file{.o} file.)
\var{sources} must be a list of filenames, most likely C/\Cpp
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. \class{MSVCCompiler} can
handle resource files in \var{sources}). Return a list of object
filenames, one per source filename in \var{sources}. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If \var{output_dir} is given, object files will be put under it, while
retaining their original path component. That is, \file{foo/bar.c}
normally compiles to \file{foo/bar.o} (for a \UNIX{} implementation); if
\var{output_dir} is \var{build}, then it would compile to
\file{build/foo/bar.o}.
\var{macros}, if given, must be a list of macro definitions. A macro
definition is either a \code{(\var{name}, \var{value})} 2-tuple or a
\code{(\var{name},)} 1-tuple.
The former defines a macro; if the value is \code{None}, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/undefinitions take
precedence.
\var{include_dirs}, if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
\var{debug} is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
\var{extra_preargs} and \var{extra_postargs} are implementation-dependent.
On platforms that have the notion of a command-line (e.g. \UNIX,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
\var{depends}, if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises \exception{CompileError} on failure.
\end{methoddesc}
\begin{methoddesc}{create_static_lib}{objects, output_libname\optional{, output_dir=\code{None}, debug=\code{0}, target_lang=\code{None}}}
Link a bunch of stuff together to create a static library file.
The ``bunch of stuff'' consists of the list of object files supplied
as \var{objects}, the extra object files supplied to
\method{add_link_object()} and/or \method{set_link_objects()}, the libraries
supplied to \method{add_library()} and/or \method{set_libraries()}, and the
libraries supplied as \var{libraries} (if any).
\var{output_libname} should be a library name, not a filename; the
filename will be inferred from the library name. \var{output_dir} is
the directory where the library file will be put. XXX defaults to what?
\var{debug} is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the \var{debug} flag is included here
just for consistency).
\var{target_lang} is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises \exception{LibError} on failure.
\end{methoddesc}
\begin{methoddesc}{link}{target_desc, objects, output_filename\optional{, output_dir=\code{None}, libraries=\code{None}, library_dirs=\code{None}, runtime_library_dirs=\code{None}, export_symbols=\code{None}, debug=\code{0}, extra_preargs=\code{None}, extra_postargs=\code{None}, build_temp=\code{None}, target_lang=\code{None}}}
Link a bunch of stuff together to create an executable or
shared library file.
The ``bunch of stuff'' consists of the list of object files supplied
as \var{objects}. \var{output_filename} should be a filename. If
\var{output_dir} is supplied, \var{output_filename} is relative to it
(i.e. \var{output_filename} can provide directory components if
needed).
\var{libraries} is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. \var{foo} becomes \file{libfoo.a}
on \UNIX{} and \file{foo.lib} on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
\var{library_dirs}, if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to \method{add_library_dir()} and/or
\method{set_library_dirs()}. \var{runtime_library_dirs} is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on \UNIX.)
\var{export_symbols} is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
\var{debug} is as for \method{compile()} and \method{create_static_lib()},
with the slight distinction that it actually matters on most platforms (as
opposed to \method{create_static_lib()}, which includes a \var{debug} flag
mostly for form's sake).
\var{extra_preargs} and \var{extra_postargs} are as for \method{compile()}
(except of course that they supply command-line arguments for the
particular linker being used).
\var{target_lang} is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises \exception{LinkError} on failure.
\end{methoddesc}
\begin{methoddesc}{link_executable}{objects, output_progname\optional{, output_dir=\code{None}, libraries=\code{None}, library_dirs=\code{None}, runtime_library_dirs=\code{None}, debug=\code{0}, extra_preargs=\code{None}, extra_postargs=\code{None}, target_lang=\code{None}}}
Link an executable.
\var{output_progname} is the name of the file executable,
while \var{objects} are a list of object filenames to link in. Other arguments
are as for the \method{link} method.
\end{methoddesc}
\begin{methoddesc}{link_shared_lib}{objects, output_libname\optional{, output_dir=\code{None}, libraries=\code{None}, library_dirs=\code{None}, runtime_library_dirs=\code{None}, export_symbols=\code{None}, debug=\code{0}, extra_preargs=\code{None}, extra_postargs=\code{None}, build_temp=\code{None}, target_lang=\code{None}}}
Link a shared library. \var{output_libname} is the name of the output
library, while \var{objects} is a list of object filenames to link in.
Other arguments are as for the \method{link} method.
\end{methoddesc}
\begin{methoddesc}{link_shared_object}{objects, output_filename\optional{, output_dir=\code{None}, libraries=\code{None}, library_dirs=\code{None}, runtime_library_dirs=\code{None}, export_symbols=\code{None}, debug=\code{0}, extra_preargs=\code{None}, extra_postargs=\code{None}, build_temp=\code{None}, target_lang=\code{None}}}
Link a shared object. \var{output_filename} is the name of the shared object
that will be created, while \var{objects} is a list of object filenames
to link in. Other arguments are as for the \method{link} method.
\end{methoddesc}
\begin{methoddesc}{preprocess}{source\optional{, output_file=\code{None}, macros=\code{None}, include_dirs=\code{None}, extra_preargs=\code{None}, extra_postargs=\code{None}}}
Preprocess a single C/\Cpp{} source file, named in \var{source}.
Output will be written to file named \var{output_file}, or \var{stdout} if
\var{output_file} not supplied. \var{macros} is a list of macro
definitions as for \method{compile()}, which will augment the macros set
with \method{define_macro()} and \method{undefine_macro()}.
\var{include_dirs} is a list of directory names that will be added to the
default list, in the same way as \method{add_include_dir()}.
Raises \exception{PreprocessError} on failure.
\end{methoddesc}
The following utility methods are defined by the \class{CCompiler} class,
for use by the various concrete subclasses.
\begin{methoddesc}{executable_filename}{basename\optional{, strip_dir=\code{0}, output_dir=\code{''}}}
Returns the filename of the executable for the given \var{basename}.
Typically for non-Windows platforms this is the same as the basename,
while Windows will get a \file{.exe} added.
\end{methoddesc}
\begin{methoddesc}{library_filename}{libname\optional{, lib_type=\code{'static'}, strip_dir=\code{0}, output_dir=\code{''}}}
Returns the filename for the given library name on the current platform.
On \UNIX{} a library with \var{lib_type} of \code{'static'} will typically
be of the form \file{liblibname.a}, while a \var{lib_type} of \code{'dynamic'}
will be of the form \file{liblibname.so}.
\end{methoddesc}
\begin{methoddesc}{object_filenames}{source_filenames\optional{, strip_dir=\code{0}, output_dir=\code{''}}}
Returns the name of the object files for the given source files.
\var{source_filenames} should be a list of filenames.
\end{methoddesc}
\begin{methoddesc}{shared_object_filename}{basename\optional{, strip_dir=\code{0}, output_dir=\code{''}}}
Returns the name of a shared object file for the given file name \var{basename}.
\end{methoddesc}
\begin{methoddesc}{execute}{func, args\optional{, msg=\code{None}, level=\code{1}}}
Invokes \function{distutils.util.execute()} This method invokes a
Python function \var{func} with the given arguments \var{args}, after
logging and taking into account the \var{dry_run} flag. XXX see also.
\end{methoddesc}
\begin{methoddesc}{spawn}{cmd}
Invokes \function{distutils.util.spawn()}. This invokes an external
process to run the given command. XXX see also.
\end{methoddesc}
\begin{methoddesc}{mkpath}{name\optional{, mode=\code{511}}}
Invokes \function{distutils.dir_util.mkpath()}. This creates a directory
and any missing ancestor directories. XXX see also.
\end{methoddesc}
\begin{methoddesc}{move_file}{src, dst}
Invokes \method{distutils.file_util.move_file()}. Renames \var{src} to
\var{dst}. XXX see also.
\end{methoddesc}
\begin{methoddesc}{announce}{msg\optional{, level=\code{1}}}
Write a message using \function{distutils.log.debug()}. XXX see also.
\end{methoddesc}
\begin{methoddesc}{warn}{msg}
Write a warning message \var{msg} to standard error.
\end{methoddesc}
\begin{methoddesc}{debug_print}{msg}
If the \var{debug} flag is set on this \class{CCompiler} instance, print
\var{msg} to standard output, otherwise do nothing.
\end{methoddesc}
\end{classdesc}
%\subsection{Compiler-specific modules}
%
%The following modules implement concrete subclasses of the abstract
%\class{CCompiler} class. They should not be instantiated directly, but should
%be created using \function{distutils.ccompiler.new_compiler()} factory
%function.
\section{\module{distutils.unixccompiler} --- Unix C Compiler}
\declaremodule{standard}{distutils.unixccompiler}
\modulesynopsis{UNIX C Compiler}
This module provides the \class{UnixCCompiler} class, a subclass of
\class{CCompiler} that handles the typical \UNIX-style command-line
C compiler:
\begin{itemize}
\item macros defined with \programopt{-D\var{name}\optional{=value}}
\item macros undefined with \programopt{-U\var{name}}
\item include search directories specified with
\programopt{-I\var{dir}}
\item libraries specified with \programopt{-l\var{lib}}
\item library search directories specified with \programopt{-L\var{dir}}
\item compile handled by \program{cc} (or similar) executable with
\programopt{-c} option: compiles \file{.c} to \file{.o}
\item link static library handled by \program{ar} command (possibly
with \program{ranlib})
\item link shared library handled by \program{cc} \programopt{-shared}
\end{itemize}
\section{\module{distutils.msvccompiler} --- Microsoft Compiler}
\declaremodule{standard}{distutils.msvccompiler}
\modulesynopsis{Microsoft Compiler}
This module provides \class{MSVCCompiler}, an implementation of the abstract
\class{CCompiler} class for Microsoft Visual Studio. Typically, extension
modules need to be compiled with the same compiler that was used to compile
Python. For Python 2.3 and earlier, the compiler was Visual Studio 6. For
Python 2.4 and 2.5, the compiler is Visual Studio .NET 2003. The AMD64
and Itanium binaries are created using the Platform SDK.
\class{MSVCCompiler} will normally choose the right compiler, linker etc.
on its own. To override this choice, the environment variables
\var{DISTUTILS\_USE\_SDK} and \var{MSSdk} must be both set. \var{MSSdk}
indicates that the current environment has been setup by the SDK's
\code{SetEnv.Cmd} script, or that the environment variables had been
registered when the SDK was installed; \var{DISTUTILS\_USE\_SDK} indicates
that the distutils user has made an explicit choice to override the
compiler selection by \class{MSVCCompiler}.
\section{\module{distutils.bcppcompiler} --- Borland Compiler}
\declaremodule{standard}{distutils.bcppcompiler}
This module provides \class{BorlandCCompiler}, an subclass of the abstract \class{CCompiler} class for the Borland \Cpp{} compiler.
\section{\module{distutils.cygwincompiler} --- Cygwin Compiler}
\declaremodule{standard}{distutils.cygwinccompiler}
This module provides the \class{CygwinCCompiler} class, a subclass of \class{UnixCCompiler} that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
\section{\module{distutils.emxccompiler} --- OS/2 EMX Compiler}
\declaremodule{standard}{distutils.emxccompiler}
\modulesynopsis{OS/2 EMX Compiler support}
This module provides the EMXCCompiler class, a subclass of \class{UnixCCompiler} that handles the EMX port of the GNU C compiler to OS/2.
\section{\module{distutils.mwerkscompiler} --- Metrowerks CodeWarrior support}
\declaremodule{standard}{distutils.mwerkscompiler}
\modulesynopsis{Metrowerks CodeWarrior support}
Contains \class{MWerksCompiler}, an implementation of the abstract
\class{CCompiler} class for MetroWerks CodeWarrior on the pre-Mac OS X Macintosh.
Needs work to support CW on Windows or Mac OS X.
%\subsection{Utility modules}
%
%The following modules all provide general utility functions. They haven't
%all been documented yet.
\section{\module{distutils.archive_util} ---
Archiving utilities}
\declaremodule[distutils.archiveutil]{standard}{distutils.archive_util}
\modulesynopsis{Utility functions for creating archive files (tarballs, zip files, ...)}
This module provides a few functions for creating archive files, such as
tarballs or zipfiles.
\begin{funcdesc}{make_archive}{base_name, format\optional{, root_dir=\code{None}, base_dir=\code{None}, verbose=\code{0}, dry_run=\code{0}}}
Create an archive file (eg. \code{zip} or \code{tar}). \var{base_name}
is the name of the file to create, minus any format-specific extension;
\var{format} is the archive format: one of \code{zip}, \code{tar},
\code{ztar}, or \code{gztar}.
\var{root_dir} is a directory that will be the root directory of the
archive; ie. we typically \code{chdir} into \var{root_dir} before
creating the archive. \var{base_dir} is the directory where we start
archiving from; ie. \var{base_dir} will be the common prefix of all files and
directories in the archive. \var{root_dir} and \var{base_dir} both default
to the current directory. Returns the name of the archive file.
\warning{This should be changed to support bz2 files}
\end{funcdesc}
\begin{funcdesc}{make_tarball}{base_name, base_dir\optional{, compress=\code{'gzip'}, verbose=\code{0}, dry_run=\code{0}}}'Create an (optional compressed) archive as a tar file from all files in and under \var{base_dir}. \var{compress} must be \code{'gzip'} (the default),
\code{'compress'}, \code{'bzip2'}, or \code{None}. Both \program{tar}
and the compression utility named by \var{compress} must be on the
default program search path, so this is probably \UNIX-specific. The
output tar file will be named \file{\var{base_dir}.tar}, possibly plus
the appropriate compression extension (\file{.gz}, \file{.bz2} or
\file{.Z}). Return the output filename.
\warning{This should be replaced with calls to the \module{tarfile} module.}
\end{funcdesc}
\begin{funcdesc}{make_zipfile}{base_name, base_dir\optional{, verbose=\code{0}, dry_run=\code{0}}}
Create a zip file from all files in and under \var{base_dir}. The output
zip file will be named \var{base_dir} + \file{.zip}. Uses either the
\module{zipfile} Python module (if available) or the InfoZIP \file{zip}
utility (if installed and found on the default search path). If neither
tool is available, raises \exception{DistutilsExecError}.
Returns the name of the output zip file.
\end{funcdesc}
\section{\module{distutils.dep_util} --- Dependency checking}
\declaremodule[distutils.deputil]{standard}{distutils.dep_util}
\modulesynopsis{Utility functions for simple dependency checking}
This module provides functions for performing simple, timestamp-based
dependency of files and groups of files; also, functions based entirely
on such timestamp dependency analysis.
\begin{funcdesc}{newer}{source, target}
Return true if \var{source} exists and is more recently modified than
\var{target}, or if \var{source} exists and \var{target} doesn't.
Return false if both exist and \var{target} is the same age or newer
than \var{source}.
Raise \exception{DistutilsFileError} if \var{source} does not exist.
\end{funcdesc}
\begin{funcdesc}{newer_pairwise}{sources, targets}
Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (\var{sources},
\var{targets}) where source is newer than target, according to the semantics
of \function{newer()}
%% equivalent to a listcomp...
\end{funcdesc}
\begin{funcdesc}{newer_group}{sources, target\optional{, missing=\code{'error'}}}
Return true if \var{target} is out-of-date with respect to any file
listed in \var{sources} In other words, if \var{target} exists and is newer
than every file in \var{sources}, return false; otherwise return true.
\var{missing} controls what we do when a source file is missing; the
default (\code{'error'}) is to blow up with an \exception{OSError} from
inside \function{os.stat()};
if it is \code{'ignore'}, we silently drop any missing source files; if it is
\code{'newer'}, any missing source files make us assume that \var{target} is
out-of-date (this is handy in ``dry-run'' mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
\end{funcdesc}
\section{\module{distutils.dir_util} --- Directory tree operations}
\declaremodule[distutils.dirutil]{standard}{distutils.dir_util}
\modulesynopsis{Utility functions for operating on directories and directory trees}
This module provides functions for operating on directories and trees
of directories.
\begin{funcdesc}{mkpath}{name\optional{, mode=\code{0777}, verbose=\code{0}, dry_run=\code{0}}}
Create a directory and any missing ancestor directories. If the
directory already exists (or if \var{name} is the empty string, which
means the current directory, which of course exists), then do
nothing. Raise \exception{DistutilsFileError} if unable to create some
directory along the way (eg. some sub-path exists, but is a file
rather than a directory). If \var{verbose} is true, print a one-line
summary of each mkdir to stdout. Return the list of directories
actually created.
\end{funcdesc}
\begin{funcdesc}{create_tree}{base_dir, files\optional{, mode=\code{0777}, verbose=\code{0}, dry_run=\code{0}}}
Create all the empty directories under \var{base_dir} needed to
put \var{files} there. \var{base_dir} is just the a name of a directory
which doesn't necessarily exist yet; \var{files} is a list of filenames
to be interpreted relative to \var{base_dir}. \var{base_dir} + the
directory portion of every file in \var{files} will be created if it
doesn't already exist. \var{mode}, \var{verbose} and \var{dry_run} flags
are as for \function{mkpath()}.
\end{funcdesc}
\begin{funcdesc}{copy_tree}{src, dst\optional{preserve_mode=\code{1}, preserve_times=\code{1}, preserve_symlinks=\code{0}, update=\code{0}, verbose=\code{0}, dry_run=\code{0}}}
Copy an entire directory tree \var{src} to a new location \var{dst}. Both
\var{src} and \var{dst} must be directory names. If \var{src} is not a
directory, raise \exception{DistutilsFileError}. If \var{dst} does
not exist, it is created with \function{mkpath()}. The end result of the
copy is that every file in \var{src} is copied to \var{dst}, and
directories under \var{src} are recursively copied to \var{dst}.
Return the list of files that were copied or might have been copied,
using their output name. The return value is unaffected by \var{update}
or \var{dry_run}: it is simply the list of all files under \var{src},
with the names changed to be under \var{dst}.
\var{preserve_mode} and \var{preserve_times} are the same as for
\function{copy_file} in \refmodule[distutils.fileutil]{distutils.file_util};
note that they only apply to regular files, not to directories. If
\var{preserve_symlinks} is true, symlinks will be copied as symlinks
(on platforms that support them!); otherwise (the default), the
destination of the symlink will be copied. \var{update} and
\var{verbose} are the same as for
\function{copy_file()}.
\end{funcdesc}
\begin{funcdesc}{remove_tree}{directory\optional{verbose=\code{0}, dry_run=\code{0}}}
Recursively remove \var{directory} and all files and directories underneath
it. Any errors are ignored (apart from being reported to \code{sys.stdout} if
\var{verbose} is true).
\end{funcdesc}
\XXX{Some of this could be replaced with the shutil module?}
\section{\module{distutils.file_util} --- Single file operations}
\declaremodule[distutils.fileutil]{standard}{distutils.file_util}
\modulesynopsis{Utility functions for operating on single files}
This module contains some utility functions for operating on individual files.
\begin{funcdesc}{copy_file}{src, dst\optional{preserve_mode=\code{1}, preserve_times=\code{1}, update=\code{0}, link=\code{None}, verbose=\code{0}, dry_run=\code{0}}}
Copy file \var{src} to \var{dst}. If \var{dst} is a directory, then
\var{src} is copied there with the same name; otherwise, it must be a
filename. (If the file exists, it will be ruthlessly clobbered.) If
\var{preserve_mode} is true (the default), the file's mode (type and
permission bits, or whatever is analogous on the current platform) is
copied. If \var{preserve_times} is true (the default), the last-modified
and last-access times are copied as well. If \var{update} is true,
\var{src} will only be copied if \var{dst} does not exist, or if
\var{dst} does exist but is older than \var{src}.
\var{link} allows you to make hard links (using \function{os.link}) or
symbolic links (using \function{os.symlink}) instead of copying: set it
to \code{'hard'} or \code{'sym'}; if it is \code{None} (the default),
files are copied. Don't set \var{link} on systems that don't support
it: \function{copy_file()} doesn't check if hard or symbolic linking is
available. It uses \function{_copy_file_contents()} to copy file contents.
Return a tuple \samp{(dest_name, copied)}: \var{dest_name} is the actual
name of the output file, and \var{copied} is true if the file was copied
(or would have been copied, if \var{dry_run} true).
% XXX if the destination file already exists, we clobber it if
% copying, but blow up if linking. Hmmm. And I don't know what
% macostools.copyfile() does. Should definitely be consistent, and
% should probably blow up if destination exists and we would be
% changing it (ie. it's not already a hard/soft link to src OR
% (not update) and (src newer than dst)).
\end{funcdesc}
\begin{funcdesc}{move_file}{src, dst\optional{verbose, dry_run}}
Move file \var{src} to \var{dst}. If \var{dst} is a directory, the file will
be moved into it with the same name; otherwise, \var{src} is just renamed
to \var{dst}. Returns the new full name of the file.
\warning{Handles cross-device moves on \UNIX{} using \function{copy_file()}.
What about other systems???}
\end{funcdesc}
\begin{funcdesc}{write_file}{filename, contents}
Create a file called \var{filename} and write \var{contents} (a
sequence of strings without line terminators) to it.
\end{funcdesc}
\section{\module{distutils.util} --- Miscellaneous other utility functions}
\declaremodule{standard}{distutils.util}
\modulesynopsis{Miscellaneous other utility functions}
This module contains other assorted bits and pieces that don't fit into
any other utility module.
\begin{funcdesc}{get_platform}{}
Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
\begin{itemize}
\item \code{linux-i586}
\item \code{linux-alpha}
\item \code{solaris-2.6-sun4u}
\item \code{irix-5.3}
\item \code{irix64-6.2}
\end{itemize}
For non-\POSIX{} platforms, currently just returns \code{sys.platform}.
% XXX isn't this also provided by some other non-distutils module?
\end{funcdesc}
\begin{funcdesc}{convert_path}{pathname}
Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in \UNIX{} style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
\exception{ValueError} on non-\UNIX-ish systems if \var{pathname} either
starts or ends with a slash.
\end{funcdesc}
\begin{funcdesc}{change_root}{new_root, pathname}
Return \var{pathname} with \var{new_root} prepended. If \var{pathname} is
relative, this is equivalent to \samp{os.path.join(new_root,pathname)}
Otherwise, it requires making \var{pathname} relative and then joining the
two, which is tricky on DOS/Windows.
\end{funcdesc}
\begin{funcdesc}{check_environ}{}
Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
\begin{itemize}
\item \envvar{HOME} - user's home directory (\UNIX{} only)
\item \envvar{PLAT} - description of the current platform, including
hardware and OS (see \function{get_platform()})
\end{itemize}
\end{funcdesc}
\begin{funcdesc}{subst_vars}{s, local_vars}
Perform shell/Perl-style variable substitution on \var{s}. Every
occurrence of \code{\$} followed by a name is considered a variable, and
variable is substituted by the value found in the \var{local_vars}
dictionary, or in \code{os.environ} if it's not in \var{local_vars}.
\var{os.environ} is first checked/augmented to guarantee that it contains
certain values: see \function{check_environ()}. Raise \exception{ValueError}
for any variables not found in either \var{local_vars} or \code{os.environ}.
Note that this is not a fully-fledged string interpolation function. A
valid \code{\$variable} can consist only of upper and lower case letters,
numbers and an underscore. No \{ \} or \( \) style quoting is available.
\end{funcdesc}
\begin{funcdesc}{grok_environment_error}{exc\optional{, prefix=\samp{'error: '}}}
Generate a useful error message from an \exception{EnvironmentError}
(\exception{IOError} or \exception{OSError}) exception object.
Handles Python 1.5.1 and later styles, and does what it can to deal with
exception objects that don't have a filename (which happens when the error
is due to a two-file operation, such as \function{rename()} or
\function{link()}). Returns the error message as a string prefixed
with \var{prefix}.
\end{funcdesc}
\begin{funcdesc}{split_quoted}{s}
Split a string up according to \UNIX{} shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
% Should probably be moved into the standard library.
\end{funcdesc}
\begin{funcdesc}{execute}{func, args\optional{, msg=\code{None}, verbose=\code{0}, dry_run=\code{0}}}
Perform some action that affects the outside world (for instance,
writing to the filesystem). Such actions are special because they
are disabled by the \var{dry_run} flag. This method takes
care of all that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
``external action'' being performed), and an optional message to
print.
\end{funcdesc}
\begin{funcdesc}{strtobool}{val}
Convert a string representation of truth to true (1) or false (0).
True values are \code{y}, \code{yes}, \code{t}, \code{true}, \code{on}
and \code{1}; false values are \code{n}, \code{no}, \code{f}, \code{false},
\code{off} and \code{0}. Raises \exception{ValueError} if \var{val}
is anything else.
\end{funcdesc}
\begin{funcdesc}{byte_compile}{py_files\optional{,
optimize=\code{0}, force=\code{0},
prefix=\code{None}, base_dir=\code{None},
verbose=\code{1}, dry_run=\code{0},
direct=\code{None}}}
Byte-compile a collection of Python source files to either \file{.pyc}
or \file{.pyo} files in the same directory. \var{py_files} is a list of files
to compile; any files that don't end in \file{.py} are silently skipped.
\var{optimize} must be one of the following:
\begin{itemize}
\item \code{0} - don't optimize (generate \file{.pyc})
\item \code{1} - normal optimization (like \samp{python -O})
\item \code{2} - extra optimization (like \samp{python -OO})
\end{itemize}
If \var{force} is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in \var{py_files}; you can modify these with \var{prefix} and
\var{basedir}. \var{prefix} is a string that will be stripped off of each
source filename, and \var{base_dir} is a directory name that will be
prepended (after \var{prefix} is stripped). You can supply either or both
(or neither) of \var{prefix} and \var{base_dir}, as you wish.
If \var{dry_run} is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard \module{py_compile} module, or indirectly by writing a
temporary script and executing it. Normally, you should let
\function{byte_compile()} figure out to use direct compilation or not (see
the source for details). The \var{direct} flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to \code{None}.
\end{funcdesc}
\begin{funcdesc}{rfc822_escape}{header}
Return a version of \var{header} escaped for inclusion in an
\rfc{822} header, by ensuring there are 8 spaces space after each newline.
Note that it does no other modification of the string.
% this _can_ be replaced
\end{funcdesc}
%\subsection{Distutils objects}
\section{\module{distutils.dist} --- The Distribution class}
\declaremodule{standard}{distutils.dist}
\modulesynopsis{Provides the Distribution class, which represents the
module distribution being built/installed/distributed}
This module provides the \class{Distribution} class, which represents
the module distribution being built/installed/distributed.
\section{\module{distutils.extension} --- The Extension class}
\declaremodule{standard}{distutils.extension}
\modulesynopsis{Provides the Extension class, used to describe
C/\Cpp{} extension modules in setup scripts}
This module provides the \class{Extension} class, used to describe
C/\Cpp{} extension modules in setup scripts.
%\subsection{Ungrouped modules}
%The following haven't been moved into a more appropriate section yet.
\section{\module{distutils.debug} --- Distutils debug mode}
\declaremodule{standard}{distutils.debug}
\modulesynopsis{Provides the debug flag for distutils}
This module provides the DEBUG flag.
\section{\module{distutils.errors} --- Distutils exceptions}
\declaremodule{standard}{distutils.errors}
\modulesynopsis{Provides standard distutils exceptions}
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in \samp{from ... import *} mode; it only exports
symbols whose names start with \code{Distutils} and end with \code{Error}.
\section{\module{distutils.fancy_getopt}
--- Wrapper around the standard getopt module}
\declaremodule[distutils.fancygetopt]{standard}{distutils.fancy_getopt}
\modulesynopsis{Additional \module{getopt} functionality}
This module provides a wrapper around the standard \module{getopt}
module that provides the following additional features:
\begin{itemize}
\item short and long options are tied together
\item options have help strings, so \function{fancy_getopt} could potentially
create a complete usage summary
\item options set attributes of a passed-in object
\item boolean options can have ``negative aliases'' --- eg. if
\longprogramopt{quiet} is the ``negative alias'' of
\longprogramopt{verbose}, then \longprogramopt{quiet} on the command
line sets \var{verbose} to false.
\end{itemize}
\XXX{Should be replaced with \module{optik} (which is also now
known as \module{optparse} in Python 2.3 and later).}
\begin{funcdesc}{fancy_getopt}{options, negative_opt, object, args}
Wrapper function. \var{options} is a list of
\samp{(long_option, short_option, help_string)} 3-tuples as described in the
constructor for \class{FancyGetopt}. \var{negative_opt} should be a dictionary
mapping option names to option names, both the key and value should be in the
\var{options} list. \var{object} is an object which will be used to store
values (see the \method{getopt()} method of the \class{FancyGetopt} class).
\var{args} is the argument list. Will use \code{sys.argv[1:]} if you
pass \code{None} as \var{args}.
\end{funcdesc}
\begin{funcdesc}{wrap_text}{text, width}
Wraps \var{text} to less than \var{width} wide.
\warning{Should be replaced with \module{textwrap} (which is available
in Python 2.3 and later).}
\end{funcdesc}
\begin{classdesc}{FancyGetopt}{\optional{option_table=\code{None}}}
The option_table is a list of 3-tuples: \samp{(long_option,
short_option, help_string)}
If an option takes an argument, its \var{long_option} should have \code{'='}
appended; \var{short_option} should just be a single character, no \code{':'}
in any case. \var{short_option} should be \code{None} if a \var{long_option}
doesn't have a corresponding \var{short_option}. All option tuples must have
long options.
\end{classdesc}
The \class{FancyGetopt} class provides the following methods:
\begin{methoddesc}{getopt}{\optional{args=\code{None}, object=\code{None}}}
Parse command-line options in args. Store as attributes on \var{object}.
If \var{args} is \code{None} or not supplied, uses \code{sys.argv[1:]}. If
\var{object} is \code{None} or not supplied, creates a new \class{OptionDummy}
instance, stores option values there, and returns a tuple \samp{(args,
object)}. If \var{object} is supplied, it is modified in place and
\function{getopt()} just returns \var{args}; in both cases, the returned
\var{args} is a modified copy of the passed-in \var{args} list, which
is left untouched.
% and args returned are?
\end{methoddesc}
\begin{methoddesc}{get_option_order}{}
Returns the list of \samp{(option, value)} tuples processed by the
previous run of \method{getopt()} Raises \exception{RuntimeError} if
\method{getopt()} hasn't been called yet.
\end{methoddesc}
\begin{methoddesc}{generate_help}{\optional{header=\code{None}}}
Generate help text (a list of strings, one per suggested line of
output) from the option table for this \class{FancyGetopt} object.
If supplied, prints the supplied \var{header} at the top of the help.
\end{methoddesc}
\section{\module{distutils.filelist} --- The FileList class}
\declaremodule{standard}{distutils.filelist}
\modulesynopsis{The \class{FileList} class, used for poking about the
file system and building lists of files.}
This module provides the \class{FileList} class, used for poking about
the filesystem and building lists of files.
\section{\module{distutils.log} --- Simple PEP 282-style logging}
\declaremodule{standard}{distutils.log}
\modulesynopsis{A simple logging mechanism, \pep{282}-style}
\warning{Should be replaced with standard \module{logging} module.}
%\subsubsection{\module{} --- }
%\declaremodule{standard}{distutils.magic}
%\modulesynopsis{ }
\section{\module{distutils.spawn} --- Spawn a sub-process}
\declaremodule{standard}{distutils.spawn}
\modulesynopsis{Provides the spawn() function}
This module provides the \function{spawn()} function, a front-end to
various platform-specific functions for launching another program in a
sub-process.
Also provides \function{find_executable()} to search the path for a given
executable name.
\input{sysconfig}
\section{\module{distutils.text_file} --- The TextFile class}
\declaremodule[distutils.textfile]{standard}{distutils.text_file}
\modulesynopsis{provides the TextFile class, a simple interface to text files}
This module provides the \class{TextFile} class, which gives an interface
to text files that (optionally) takes care of stripping comments, ignoring
blank lines, and joining lines with backslashes.
\begin{classdesc}{TextFile}{\optional{filename=\code{None}, file=\code{None}, **options}}
This class provides a file-like object that takes care of all
the things you commonly want to do when processing a text file
that has some line-by-line syntax: strip comments (as long as \code{\#}
is your comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
The class provides a \method{warn()} method so you can generate
warning messages that report physical line number, even if the
logical line in question spans multiple physical lines. Also
provides \method{unreadline()} for implementing line-at-a-time lookahead.
\class{TextFile} instances are create with either \var{filename}, \var{file},
or both. \exception{RuntimeError} is raised if both are \code{None}.
\var{filename} should be a string, and \var{file} a file object (or
something that provides \method{readline()} and \method{close()}
methods). It is recommended that you supply at least \var{filename},
so that \class{TextFile} can include it in warning messages. If
\var{file} is not supplied, \class{TextFile} creates its own using the
\function{open()} built-in function.
The options are all boolean, and affect the values returned by
\method{readline()}
\begin{tableiii}{c|l|l}{option name}{option name}{description}{default}
\lineiii{strip_comments}{
strip from \character{\#} to end-of-line, as well as any whitespace
leading up to the \character{\#}---unless it is escaped by a backslash}
{true}
\lineiii{lstrip_ws}{
strip leading whitespace from each line before returning it}
{false}
\lineiii{rstrip_ws}{
strip trailing whitespace (including line terminator!) from
each line before returning it.}
{true}
\lineiii{skip_blanks}{
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if \var{skip_blanks} is true.)}
{true}
\lineiii{join_lines}{
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one logical line; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.}
{false}
\lineiii{collapse_join}{
strip leading whitespace from lines that are joined to their
predecessor; only matters if \samp{(join_lines and not lstrip_ws)}}
{false}
\end{tableiii}
Note that since \var{rstrip_ws} can strip the trailing newline, the
semantics of \method{readline()} must differ from those of the builtin file
object's \method{readline()} method! In particular, \method{readline()}
returns \code{None} for end-of-file: an empty string might just be a
blank line (or an all-whitespace line), if \var{rstrip_ws} is true
but \var{skip_blanks} is not.
\begin{methoddesc}{open}{filename}
Open a new file \var{filename}. This overrides any \var{file} or
\var{filename} constructor arguments.
\end{methoddesc}
\begin{methoddesc}{close}{}
Close the current file and forget everything we know about it (including
the filename and the current line number).
\end{methoddesc}
\begin{methoddesc}{warn}{msg\optional{,line=\code{None}}}
Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, such as \samp{"lines 3-5"}. If \var{line} is supplied,
it overrides the current line number; it may be a list or tuple
to indicate a range of physical lines, or an integer for a
single physical line.
\end{methoddesc}
\begin{methoddesc}{readline}{}
Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been ``unread''
with \method{unreadline()}). If the \var{join_lines} option
is true, this may involve reading multiple physical lines
concatenated into a single string. Updates the current line number,
so calling \method{warn()} after \method{readline()} emits a warning
about the physical line(s) just read. Returns \code{None} on end-of-file,
since the empty string can occur if \var{rstrip_ws} is true but
\var{strip_blanks} is not.
\end{methoddesc}
\begin{methoddesc}{readlines}{}
Read and return the list of all logical lines remaining in the current file.
This updates the current line number to the last line of the file.
\end{methoddesc}
\begin{methoddesc}{unreadline}{line}
Push \var{line} (a string) onto an internal buffer that will be
checked by future \method{readline()} calls. Handy for implementing
a parser with line-at-a-time lookahead. Note that lines that are ``unread''
with \method{unreadline} are not subsequently re-cleansed (whitespace
stripped, or whatever) when read with \method{readline}. If multiple
calls are made to \method{unreadline} before a call to \method{readline},
the lines will be returned most in most recent first order.
\end{methoddesc}
\end{classdesc}
\section{\module{distutils.version} --- Version number classes}
\declaremodule{standard}{distutils.version}
\modulesynopsis{implements classes that represent module version numbers. }
% todo
%\section{Distutils Commands}
%
%This part of Distutils implements the various Distutils commands, such
%as \code{build}, \code{install} \&c. Each command is implemented as a
%separate module, with the command name as the name of the module.
\section{\module{distutils.cmd} --- Abstract base class for Distutils commands}
\declaremodule{standard}{distutils.cmd}
\modulesynopsis{This module provides the abstract base class Command. This
class is subclassed by the modules in the \refmodule{distutils.command}
subpackage. }
This module supplies the abstract base class \class{Command}.
\begin{classdesc}{Command}{dist}
Abstract base class for defining command classes, the ``worker bees''
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called \var{options}. The
options are declared in \method{initialize_options()} and defined
(given their final values) in \method{finalize_options()}, both of
which must be defined by every command class. The distinction between
the two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed after these outside influences have
been processed --- hence \method{finalize_options()}. The body of the
subroutine, where it does all its work based on the values of its
options, is the \method{run()} method, which must also be implemented
by every command class.
The class constructor takes a single argument \var{dist}, a
\class{Distribution} instance.
\end{classdesc}
\section{\module{distutils.command} --- Individual Distutils commands}
\declaremodule{standard}{distutils.command}
\modulesynopsis{This subpackage contains one module for each standard Distutils command.}
%\subsubsection{Individual Distutils commands}
% todo
\section{\module{distutils.command.bdist} --- Build a binary installer}
\declaremodule{standard}{distutils.command.bdist}
\modulesynopsis{Build a binary installer for a package}
% todo
\section{\module{distutils.command.bdist_packager} --- Abstract base class for packagers}
\declaremodule[distutils.command.bdistpackager]{standard}{distutils.command.bdist_packager}
\modulesynopsis{Abstract base class for packagers}
% todo
\section{\module{distutils.command.bdist_dumb} --- Build a ``dumb'' installer}
\declaremodule[distutils.command.bdistdumb]{standard}{distutils.command.bdist_dumb}
\modulesynopsis{Build a ``dumb'' installer - a simple archive of files}
% todo
\section{\module{distutils.command.bdist_msi} --- Build a Microsoft Installer binary package}
\declaremodule[distutils.command.bdistmsi]{standard}{distutils.command.bdist_msi}
\modulesynopsis{Build a binary distribution as a Windows MSI file}
% todo
\section{\module{distutils.command.bdist_rpm} --- Build a binary distribution as a Redhat RPM and SRPM}
\declaremodule[distutils.command.bdistrpm]{standard}{distutils.command.bdist_rpm}
\modulesynopsis{Build a binary distribution as a Redhat RPM and SRPM}
% todo
\section{\module{distutils.command.bdist_wininst} --- Build a Windows installer}
\declaremodule[distutils.command.bdistwininst]{standard}{distutils.command.bdist_wininst}
\modulesynopsis{Build a Windows installer}
% todo
\section{\module{distutils.command.sdist} --- Build a source distribution}
\declaremodule{standard}{distutils.command.sdist}
\modulesynopsis{Build a source distribution}
% todo
\section{\module{distutils.command.build} --- Build all files of a package}
\declaremodule{standard}{distutils.command.build}
\modulesynopsis{Build all files of a package}
% todo
\section{\module{distutils.command.build_clib} --- Build any C libraries in a package}
\declaremodule[distutils.command.buildclib]{standard}{distutils.command.build_clib}
\modulesynopsis{Build any C libraries in a package}
% todo
\section{\module{distutils.command.build_ext} --- Build any extensions in a package}
\declaremodule[distutils.command.buildext]{standard}{distutils.command.build_ext}
\modulesynopsis{Build any extensions in a package}
% todo
\section{\module{distutils.command.build_py} --- Build the .py/.pyc files of a package}
\declaremodule[distutils.command.buildpy]{standard}{distutils.command.build_py}
\modulesynopsis{Build the .py/.pyc files of a package}
% todo
\section{\module{distutils.command.build_scripts} --- Build the scripts of a package}
\declaremodule[distutils.command.buildscripts]{standard}{distutils.command.build_scripts}
\modulesynopsis{Build the scripts of a package}
% todo
\section{\module{distutils.command.clean} --- Clean a package build area}
\declaremodule{standard}{distutils.command.clean}
\modulesynopsis{Clean a package build area}
% todo
\section{\module{distutils.command.config} --- Perform package configuration}
\declaremodule{standard}{distutils.command.config}
\modulesynopsis{Perform package configuration}
% todo
\section{\module{distutils.command.install} --- Install a package}
\declaremodule{standard}{distutils.command.install}
\modulesynopsis{Install a package}
% todo
\section{\module{distutils.command.install_data}
--- Install data files from a package}
\declaremodule[distutils.command.installdata]{standard}{distutils.command.install_data}
\modulesynopsis{Install data files from a package}
% todo
\section{\module{distutils.command.install_headers}
--- Install C/\Cpp{} header files from a package}
\declaremodule[distutils.command.installheaders]{standard}{distutils.command.install_headers}
\modulesynopsis{Install C/\Cpp{} header files from a package}
% todo
\section{\module{distutils.command.install_lib}
--- Install library files from a package}
\declaremodule[distutils.command.installlib]{standard}{distutils.command.install_lib}
\modulesynopsis{Install library files from a package}
% todo
\section{\module{distutils.command.install_scripts}
--- Install script files from a package}
\declaremodule[distutils.command.installscripts]{standard}{distutils.command.install_scripts}
\modulesynopsis{Install script files from a package}
% todo
\section{\module{distutils.command.register}
--- Register a module with the Python Package Index}
\declaremodule{standard}{distutils.command.register}
\modulesynopsis{Register a module with the Python Package Index}
The \code{register} command registers the package with the Python Package
Index. This is described in more detail in \pep{301}.
% todo
\section{Creating a new Distutils command}
This section outlines the steps to create a new Distutils command.
A new command lives in a module in the \module{distutils.command}
package. There is a sample template in that directory called
\file{command_template}. Copy this file to a new module with the
same name as the new command you're implementing. This module should
implement a class with the same name as the module (and the command).
So, for instance, to create the command \code{peel_banana} (so that users
can run \samp{setup.py peel_banana}), you'd copy \file{command_template}
to \file{distutils/command/peel_banana.py}, then edit it so that it's
implementing the class \class{peel_banana}, a subclass of
\class{distutils.cmd.Command}.
Subclasses of \class{Command} must define the following methods.
\begin{methoddesc}{initialize_options()}
Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, \method{initialize_options()} implementations
are just a bunch of \samp{self.foo = None} assignments.
\end{methoddesc}
\begin{methoddesc}{finalize_options}{}
Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to to code option dependencies: if
\var{foo} depends on \var{bar}, then it is safe to set \var{foo} from
\var{bar} as long as \var{foo} still has the same value it was assigned in
\method{initialize_options()}.
\end{methoddesc}
\begin{methoddesc}{run}{}
A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
\method{initialize_options()}, customized by other commands, the setup
script, the command-line, and config files, and finalized in
\method{finalize_options()}. All terminal output and filesystem
interaction should be done by \method{run()}.
\end{methoddesc}
\var{sub_commands} formalizes the notion of a ``family'' of commands,
eg. \code{install} as the parent with sub-commands \code{install_lib},
\code{install_headers}, etc. The parent of a family of commands
defines \var{sub_commands} as a class attribute; it's a list of
2-tuples \samp{(command_name, predicate)}, with \var{command_name} a string
and \var{predicate} an unbound method, a string or None.
\var{predicate} is a method of the parent command that
determines whether the corresponding command is applicable in the
current situation. (Eg. we \code{install_headers} is only applicable if
we have any C header files to install.) If \var{predicate} is None,
that command is always applicable.
\var{sub_commands} is usually defined at the *end* of a class, because
predicates can be unbound methods, so they must already have been
defined. The canonical example is the \command{install} command.
%
% The ugly "%begin{latexonly}" pseudo-environments are really just to
% keep LaTeX2HTML quiet during the \renewcommand{} macros; they're
% not really valuable.
%
%begin{latexonly}
\renewcommand{\indexname}{Module Index}
%end{latexonly}
\input{moddist.ind} % Module Index
%begin{latexonly}
\renewcommand{\indexname}{Index}
%end{latexonly}
\input{dist.ind} % Index
\end{document}
| {
"alphanum_fraction": 0.7625679559,
"avg_line_length": 42.1294549266,
"ext": "tex",
"hexsha": "04a5a2e79dfd1eb0f7d0906e4a7743c7151ed4f2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf",
"max_forks_repo_licenses": [
"PSF-2.0"
],
"max_forks_repo_name": "deadsnakes/python2.5",
"max_forks_repo_path": "Doc/dist/dist.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"PSF-2.0"
],
"max_issues_repo_name": "deadsnakes/python2.5",
"max_issues_repo_path": "Doc/dist/dist.tex",
"max_line_length": 330,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf",
"max_stars_repo_licenses": [
"PSF-2.0"
],
"max_stars_repo_name": "deadsnakes/python2.5",
"max_stars_repo_path": "Doc/dist/dist.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 39912,
"size": 160766
} |
% The real tex generator script
% Creates a complete journal from all your created logs
\documentclass{tufte-book}
\usepackage{lipsum} % Used for placeholder text
\newcommand*{\MinYear}{2018} % Modify these two as you time progresses
\newcommand*{\MaxYear}{2022}
\usepackage[USenglish]{babel} % For Regional Text.
\usepackage[useregional]{datetime2}
\usepackage{pgffor}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{My Amazing Journal} % Title
\author{Your Amazing Name} % Author Name
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
\selectlanguage{USenglish}
% Goes to files in every year/month/ and converts them from markdown to tex
\foreach \year in {\MinYear,...,\MaxYear}{
\foreach \month in {1,2,...,12}{
\foreach \day in {1,2,...,31}{
\IfFileExists{../\year/\month/\day.md} {
% Displays a section header for the day
\section{\DTMdisplaydate{\year}{\month}{\day}{1}}
% Goes to files in every year/month/ and converts them from markdown to tex
\immediate\write18{pandoc ../\year/\month/\day.md -t latex -o ../\year/\month/\day.tex}
% Concatenates the newly created tex file to the main journal
\input{../\year/\month/\day.tex}
} {
% Do Nothing if file does not exist
}
}
}
}
\end{document}
| {
"alphanum_fraction": 0.4649073554,
"avg_line_length": 39.5777777778,
"ext": "tex",
"hexsha": "a1b25c96699a545190df6070675a7f30002c868a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "93de08cab5775739e519ca4245fc1a6971065f74",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gurupunskill/JourTex",
"max_forks_repo_path": "texscript/Journal.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "93de08cab5775739e519ca4245fc1a6971065f74",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gurupunskill/JourTex",
"max_issues_repo_path": "texscript/Journal.tex",
"max_line_length": 111,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "93de08cab5775739e519ca4245fc1a6971065f74",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gurupunskill/JourTex",
"max_stars_repo_path": "texscript/Journal.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-25T09:00:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-07-25T09:00:49.000Z",
"num_tokens": 385,
"size": 1781
} |
\section{Broadcast}
\label{sec:prop-pres-case-studies:broadcast}
Broadcast is a system of fifteen processes communicating via three-party broadcast, i.e.\ three processes at a time synchronize simultaneously.
Figure~\ref{fig:prop-pres-case-studies:broadcast-network} shows two pairs of three such processes.
For each group of three processes, there is a synchronization rule that states that actions \action{a1}, \action{a2}, and \action{a3} synchronize.
\begin{figure}[hbt]
\centering
\includegraphics[scale=0.18]{prop-pres-case-studies/figs/broadcast-network}
\caption{Groups of three processes that communicate via broadcast}
\label{fig:prop-pres-case-studies:broadcast-network}
\end{figure}
Due to restrictions imposed by an implementation platform, a transformation that breaks this down into a series of two-party synchronization might be desired.
Three transformation rules that refine a model in this way are shown in Figure~\ref{fig:prop-pres-case-studies:faulty-broadcast-rules}.
After transformation, new synchronization rules are introduced that define that \action{a1'} and \action{a2'}, and \action{a2''} and \action{a3'} synchronize.
This naive refinement does not preserve properties.
\begin{figure}[hbt]
\centering
\includegraphics[scale=0.2]{prop-pres-case-studies/figs/faulty-broadcast-rules}
\caption{Transformation rules that replace a three-party broadcast by pairwise communication}
\label{fig:prop-pres-case-studies:faulty-broadcast-rules}
\end{figure}
Improved versions of these transformation rules are shown in Figure~\ref{fig:prop-pres-case-studies:broadcast-rules}.
Actions~\action{a2} and \action{a3} are replaced by \action{a2'} and \action{a3'}, respectively, to make the rule system terminating and confluent.
After transformation, new synchronization rules are introduced that define that
\begin{itemize*}
\item \action{m1a1} and \action{m2a1},
\item \action{c1a1} and \action{c2a1},
\item \action{a1a1} and \action{a2a1}, and
\item \action{a2'} and \action{a3'}
\end{itemize*}
synchronize.
The dashed $\tau$-transitions in Figure~\ref{fig:prop-pres-case-studies:broadcast-rules} indicate that this transformation is only property preserving if state~$i$ is matched on states that are diverging.
\begin{figure}[hbt]
\centering
\includegraphics[scale=0.2]{prop-pres-case-studies/figs/broadcast-rules}
\caption{Three improved transformation rules that replace a three-party broadcast by pairwise communication}
\label{fig:prop-pres-case-studies:broadcast-rules}
\end{figure}
To check whether the transformation rules of Figure~\ref{fig:prop-pres-case-studies:broadcast-rules} preserve properties, a number of checks have to be performed.
Figure~\ref{fig:prop-pres-case-studies:broadcast-lhs} show some of the LTSs that are used for these checks.
The LTSs in Figure~\ref{fig:prop-pres-case-studies:broadcast-lhs} are created from the left-hand sides of the three transformation rules in Figure~\ref{fig:prop-pres-case-studies:broadcast-rules}.
The tools \EXPOPEN and \ltscompare of the \mCRLTwo toolkit cannot handle LTSs with multiple initial states.
To be able to use these tools to perform the checks, one initial state is added to each of the LTSs, as well as $\tau$-transitions to the original initial states.
Figure~\ref{fig:prop-pres-case-studies:broadcast-lhs} also show the $\kappa$-loops that are added to the original initial states.
Each of the checks determines whether a network consisting of a combination of LTSs created from the left-hand sides of transformation rules is divergence-sensitive branching bisimilar with the network consisting of the corresponding LTSs created from the right-hand sides, after hiding the appropriate actions in both networks.
\begin{figure}[hbt]
\begin{minipage}[b]{4cm}
\centering
\includegraphics[scale=0.2]{prop-pres-case-studies/figs/broadcast-rule1-lhs}
\end{minipage}
\hfill
\begin{minipage}[b]{4cm}
\centering
\includegraphics[scale=0.2]{prop-pres-case-studies/figs/broadcast-rule2-lhs}
\end{minipage}
\hfill
\begin{minipage}[b]{4cm}
\centering
\includegraphics[scale=0.2]{prop-pres-case-studies/figs/broadcast-rule3-lhs}
\end{minipage}
\caption{Process LTSs of the left-hand sides of the transformation rules in Figure~\ref{fig:prop-pres-case-studies:broadcast-rules}}
\label{fig:prop-pres-case-studies:broadcast-lhs}
\end{figure} | {
"alphanum_fraction": 0.7878718535,
"avg_line_length": 62.4285714286,
"ext": "tex",
"hexsha": "55476f8f5fa04f013b2fdb2cb39f482a3d0d1675",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ljpengelen/latex-phd-thesis",
"max_forks_repo_path": "prop-pres-case-studies/broadcast.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ljpengelen/latex-phd-thesis",
"max_issues_repo_path": "prop-pres-case-studies/broadcast.tex",
"max_line_length": 328,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ljpengelen/latex-phd-thesis",
"max_stars_repo_path": "prop-pres-case-studies/broadcast.tex",
"max_stars_repo_stars_event_max_datetime": "2019-12-18T21:53:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-12-18T21:53:57.000Z",
"num_tokens": 1150,
"size": 4370
} |
Subsets and Splits