Search is not available for this dataset
text
string
meta
dict
% % API Documentation for QSTK % Module QSTK.qstkfeat.features % % Generated by epydoc 3.0.1 % [Mon Mar 5 00:49:20 2012] % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Module Description %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}|(} \section{Module QSTK.qstkfeat.features} \label{QSTK:qstkfeat:features} Created on Nov 7, 2011 \textbf{Author:} John Cornwell \textbf{Contact:} [email protected] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Functions %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Functions} \label{QSTK:qstkfeat:features:featMA} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featMA \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featMA}(\textit{dData}, \textit{lLookback}={\tt 30}, \textit{bRel}={\tt True}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to look in the past \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featRSI} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featRSI \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featRSI}(\textit{dData}, \textit{lLookback}={\tt 14}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to look in the past, 14 is standard \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featDrawDown} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featDrawDown \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featDrawDown}(\textit{dData}, \textit{lLookback}={\tt 30}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Days to look back \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing values \end{quote} \textbf{Warning:} Drawdown and RunUp can depend heavily on sample period \end{boxedminipage} \label{QSTK:qstkfeat:features:featRunUp} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featRunUp \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featRunUp}(\textit{dData}, \textit{lLookback}={\tt 30}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to calculate min over \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \textbf{Warning:} Drawdown and RunUp can depend heavily on when the sample starts \end{boxedminipage} \label{QSTK:qstkfeat:features:featVolumeDelta} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featVolumeDelta \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featVolumeDelta}(\textit{dData}, \textit{lLookback}={\tt 30}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to use for MA \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featAroon} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featAroon \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featAroon}(\textit{dData}, \textit{bDown}={\tt False}, \textit{lLookback}={\tt 25}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[bDown] If false, calculates aroonUp (high), else aroonDown (lows) \item[lLookback] Days to lookback to calculate high/low from \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featStochastic} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featStochastic \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featStochastic}(\textit{dData}, \textit{lLookback}={\tt 14}, \textit{bFast}={\tt True}, \textit{lMA}={\tt 3}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxx} \item[dData] Dictionary of data to use \item[bFast] If false, do slow stochastics, 3 day MA, if not use fast, no MA \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featBeta} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featBeta \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featBeta}(\textit{dData}, \textit{lLookback}={\tt 14}, \textit{sMarket}={\tt \texttt{'}\texttt{\$SPX}\texttt{'}}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxx} \item[dData] Dictionary of data to use \item[sStock] Stock to calculate beta relative to \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featBollinger} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featBollinger \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featBollinger}(\textit{dData}, \textit{lLookback}={\tt 20}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to calculate moving average over \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \end{boxedminipage} \label{QSTK:qstkfeat:features:featCorrelation} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}!QSTK.qstkfeat.features.featCorrelation \textit{(function)}} \vspace{0.5ex} \hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth} \raggedright \textbf{featCorrelation}(\textit{dData}, \textit{lLookback}={\tt 20}, \textit{sRel}={\tt \texttt{'}\texttt{\$SPX}\texttt{'}}) \vspace{-1.5ex} \rule{\textwidth}{0.5\fboxrule} \setlength{\parskip}{2ex} \setlength{\parskip}{1ex} \textbf{Parameters} \vspace{-1ex} \begin{quote} \begin{Ventry}{xxxxxxxxx} \item[dData] Dictionary of data to use \item[lLookback] Number of days to calculate moving average over \end{Ventry} \end{quote} \textbf{Return Value} \vspace{-1ex} \begin{quote} DataFrame array containing feature values \end{quote} \end{boxedminipage} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Variables %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Variables} \vspace{-1cm} \hspace{\varindent}\begin{longtable}{|p{\varnamewidth}|p{\vardescrwidth}|l} \cline{1-2} \cline{1-2} \centering \textbf{Name} & \centering \textbf{Description}& \\ \cline{1-2} \endhead\cline{1-2}\multicolumn{3}{r}{\small\textit{continued on next page}}\\\endfoot\cline{1-2} \endlastfoot\raggedright \_\-\_\-p\-a\-c\-k\-a\-g\-e\-\_\-\_\- & \raggedright \textbf{Value:} {\tt \texttt{'}\texttt{QSTK.qstkfeat}\texttt{'}}&\\ \cline{1-2} \end{longtable} \index{QSTK \textit{(package)}!QSTK.qstkfeat \textit{(package)}!QSTK.qstkfeat.features \textit{(module)}|)}
{ "alphanum_fraction": 0.6035176759, "avg_line_length": 23.5628865979, "ext": "tex", "hexsha": "ef3890694c9aad31eb20dc9d151b36e459f1c9e6", "lang": "TeX", "max_forks_count": 154, "max_forks_repo_forks_event_max_datetime": "2022-03-19T02:27:59.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-30T09:41:15.000Z", "max_forks_repo_head_hexsha": "0eb2c7a776c259a087fdcac1d3ff883eb0b5516c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "jenniyanjie/QuantSoftwareToolkit", "max_forks_repo_path": "Legacy/Docs/pdf/QSTK.qstkfeat.features-module.tex", "max_issues_count": 19, "max_issues_repo_head_hexsha": "0eb2c7a776c259a087fdcac1d3ff883eb0b5516c", "max_issues_repo_issues_event_max_datetime": "2021-07-19T11:13:47.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-04T13:12:33.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "jenniyanjie/QuantSoftwareToolkit", "max_issues_repo_path": "Legacy/Docs/pdf/QSTK.qstkfeat.features-module.tex", "max_line_length": 168, "max_stars_count": 339, "max_stars_repo_head_hexsha": "4981506c37227a72404229d5e1e0887f797a5d57", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "elxavicio/QSTK", "max_stars_repo_path": "Docs/pdf/QSTK.qstkfeat.features-module.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-23T23:32:24.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-01T10:06:49.000Z", "num_tokens": 3652, "size": 11428 }
\documentclass[main.tex]{subfiles} \begin{document} Let us continue with our discussion of the power spectrum: the \textbf{spectral index} \(n_s\) is defined as\footnote{Note that the dimensionless power spectrum is sometimes denoted as \(\Delta^2\) and sometimes as \(\Delta \): here (in this section) we use the latter definition.} % \begin{align} n_s - 1 = \dv{\log \Delta (k)}{\log k} \,. \end{align} The index \(s\) means ``scalar''. In general this will depend on the wavenumber \(k\); it is a convenient description of the shape of the power spectrum. If \(n_s\) were a constant, then we would have a \emph{powerlaw} spectrum: \(\Delta (k) = \Delta (k_0 ) (k / k_0 )^{n_s - 1}\). If \(n_s = 1\), we have the \textbf{Harrison-Zel'dovich} power spectrum, for which \(\Delta \) does not depend on \(k\). This would be a \emph{scale-invariant} power spectrum. In a quantum-mechanical formalism, we will calculate the power spectrum as % \begin{align} \bra{0} \delta \varphi _{\vec{k}_1} \delta \varphi _{\vec{k}_2}^{*} \ket{0} \,, \end{align} % which will be written in terms of creation and annihilation operators: we have % \begin{align} \bra{0} a a \ket{0}= \bra{0} a ^\dag a \ket{0}= \bra{0} a ^\dag a ^\dag \ket{0}= 0 \,, \end{align} % while % \begin{align} \bra{0} a a ^\dag \ket{0} &= \underbrace{\bra{0} \qty[a, a ^\dag] \ket{0}}_{ \delta^{(3)} (\vec{k}_1 - \vec{k}_2)} - \underbrace{\bra{0} a ^\dag a \ket{0}}_{= 0} \,, \end{align} % so % \begin{align} \expval{ \delta \varphi _{\vec{k}_1} \delta \varphi _{\vec{k}_2}^{*}} = (2 \pi )^3 \abs{ \delta \varphi _{\vec{k}_1}}^2 \delta^3(\vec{k}_1 - \vec{k}_2) \,, \end{align} % where \(\delta \varphi _{k_1} = u_{k_1} / a\). Therefore, the power spectrum is given by % \begin{align} P(k) = \abs{ \delta \varphi _k}^2 \,. \end{align} Recall that in the superhorizon case we found % \begin{align} \abs{ \delta \varphi _k} = \frac{H}{\sqrt{2 k^3}} \qty( \frac{k}{aH})^{3/2 - \nu } \,, \end{align} % where \(\nu^2 = 9/4 + 9 \epsilon - 3 \eta _V\), so \(\nu \approx 3/2 + 3\epsilon - \eta _V \), meaning that the index is \(3/2 - \nu = \eta _V - 3 \epsilon \). Then, the power spectrum reads % \begin{align} \label{eq:scalar-field-power-spectrum} \Delta_{ \delta \varphi }(k) = \frac{k^3}{2 \pi^2} \abs{ \delta \varphi _k}^2 = \qty( \frac{H}{2 \pi })^2 \qty( \frac{k}{aH})^{3-2\nu } \,. \end{align} % There will be a weak scale dependence proportional to the slow-roll parameters: \(3 - 2 \nu = 2 \eta _V - 6 \epsilon \). \section{From \(\delta \varphi \) to primordial density perturbations} The first Friedmann equation will read % \begin{align} H^2 = \frac{8 \pi G}{3} \rho_\varphi \approx \frac{8 \pi G}{3} V(\varphi ) \,, \end{align} % so the density fluctuation can be written as % \begin{align} \delta \rho _\varphi \approx V' (\varphi ) \delta \varphi \approx - 3 H \dot{\varphi} \delta \varphi \marginnote{See equation \eqref{eq:approx-EOM-scalar-field-flat-FLRW}} \,. \end{align} Recall that we can define a time shift \(\delta t = - \delta \varphi / \dot{\varphi} \). This means that we will have perturbations in the expansion of the universe from place to place. The number of \(e\)-folds is given by % \begin{align} N = \log \qty( \frac{a(t)}{a (t_*)}) = \int_{t_*}^{t} H(\widetilde{t}) \dd{\widetilde{t}} \,. \end{align} The fluctuations will perturb the number of \(e\)-folds by % \begin{align} \label{eq:delta-N-definition} \zeta = \delta N = H \delta t = - H \frac{ \delta \varphi }{\dot{\varphi}} \approx - H \frac{ \delta \rho _\varphi }{ \dot{\rho}_\varphi } \,. \end{align} This is called the ``\(\delta N\) formalism'' for the study of large-scale perturbations. The last equality in \eqref{eq:delta-N-definition} comes from the fact that % \begin{align} \dot{\rho}_\varphi = - 3H \qty(\rho _\varphi + P_\varphi ) = - 3 H \dot{\varphi}^2 \marginnote{From equation \eqref{eq:energy-momentum-scalar-field}.} \,, \end{align} % so indeed % \begin{align} H \frac{ \delta \rho _\varphi }{\dot{\rho}_\varphi } = \frac{- 3 H^2 \dot{\varphi} \delta \varphi }{-3 H \dot{\varphi}^2} = H \frac{ \delta \varphi }{\dot{\varphi}} \,. \end{align} The quantity \(\delta N = \zeta\) is \textbf{gauge invariant}. It is written as % \begin{align} \zeta = - \hat{\Phi} - H \frac{ \delta \rho }{\dot{\rho} } \,, \end{align} % where \(\hat{\Phi}\) is related to scalar perturbations of the spatial part of the metric, \(g_{ij}\). We shall explore this later. This \(\zeta\) is called the \textbf{curvature perturbation on uniform energy density hypersurfaces}. Why did we write this with \(\rho \) instead of \(\rho _\varphi \)? This definition is completely general; it can be applied at any time and for a generic evolution of the universe. We can then specify it to % \begin{align} \zeta_\varphi \approx - H \frac{ \delta \rho _\varphi }{\dot{\rho}_\varphi } \,. \end{align} What we will show is that on superhorizon scales \(\zeta\) remains constant (for single-field inflation, at least). Therefore, this keeps a sort of ``record'' of what happened after horizon crossing. Let us denote as \(t^{(1)}_H(k)\) the time of horizon crossing during inflation for perturbations with wavenumber \(k\), and \(t^{(2)}_H(k)\) the time \emph{after inflation} of the second horizon crossing, when the perturbation comes back inside the horizon. The value of \(\zeta \) at these two times will be the same (again, as long as we are considering a single-field model of inflation). % \todo[inline]{This can be seen since\dots} We know that \(\delta \varphi \sim H / 2 \pi \) (statistically), and \(H^2 \approx 8 \pi G V(\varphi ) /3\): then, specifying the potential gives a prediction for the power spectrum. Suppose that the perturbation re-enters during the radiation-dominated epoch. Then, % \begin{align} H \frac{ \delta \rho }{\dot{\rho} } \approx \frac{H \delta \rho _\gamma }{-4 H \rho _\gamma } = - \frac{1}{4}\frac{ \delta \rho _\gamma }{\rho _\gamma } \,, \end{align} % since % \begin{align} \dot{\rho} = \dv{\rho }{a} \dot{a} = \frac{\rho}{a} \dv{\log \rho }{\log a} \dot{a} = \frac{\rho}{a} (-4) \dot{a} = - 4 \rho H \,. \end{align} On the other hand, at the first horizon crossing \(\zeta = \delta N = - (H / \dot{\varphi}) \delta \varphi \), therefore the dimensionless power spectrum at the second horizon crossing can be referred back to the first as % \begin{align} \Delta_{ \delta \rho / \rho } (k)= \eval{\frac{H^2}{ \dot{\varphi}^2} \Delta_{ \delta \varphi } (k)}_{t^{(1)}_H (k)} \,, \end{align} % and % \begin{align} \Delta_{ \delta \varphi } (k) = \qty( \frac{H}{ 2 \pi })^2 \qty(\frac{k}{aH})^{3-2 \nu } \,. \end{align} \end{document}
{ "alphanum_fraction": 0.6517562294, "avg_line_length": 36.0108108108, "ext": "tex", "hexsha": "dc88651e52ef3d0b6f1452db65ef99692505aa11", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z", "max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jacopok/notes", "max_forks_repo_path": "ap_third_semester/early_universe/nov02.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jacopok/notes", "max_issues_repo_path": "ap_third_semester/early_universe/nov02.tex", "max_line_length": 281, "max_stars_count": 6, "max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jacopok/notes", "max_stars_repo_path": "ap_third_semester/early_universe/nov02.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z", "num_tokens": 2365, "size": 6662 }
\documentclass{article} \usepackage{mathrsfs,amsmath} \usepackage{xcolor} \usepackage{titlesec} \usepackage{listings} \usepackage{syntax} \usepackage{pythonhighlighting} \usepackage{graphicx} \graphicspath{ {./assets/} } \usepackage[margin=1.4in]{geometry} \title{Handout \#7 | CS 471} \author{Jared Dyreson\\ California State University, Fullerton} \DeclareRobustCommand{\bowtie}{% \mathrel\triangleright\joinrel\mathrel\triangleleft} \usepackage [english]{babel} \usepackage [autostyle, english = american]{csquotes} \MakeOuterQuote{"} \titlespacing*{\section} {0pt}{5.5ex plus 1ex minus .2ex}{4.3ex plus .2ex} \titlespacing*{\subsection} {0pt}{5.5ex plus 1ex minus .2ex}{4.3ex plus .2ex} \usepackage{hyperref} \hypersetup{ colorlinks, citecolor=black, filecolor=black, linkcolor=black, urlcolor=black } \begin{document} \maketitle \tableofcontents \newpage \section{Questions} \begin{enumerate} \item A packet switch receives a packet and determines the outbound link to which the packet should be forwarded. When the packet arrives, one other packet is halfway done being transmitted on this outbound link and four other packets are waiting to be transmitted. Packets are transmitted in order of arrival. Suppose all packets are 1,500 bytes and the link rate is 2 MBPS. What is the queuing delay for the packet? More generally,what is the queuing delay when all packets have length L, the transmission rate is R, x bits of the currently- being-transmitted packet have been transmitted, and n packets are already in the queue? \begin{itemize} \item Generally the queueing delay can be expressed as the following: \\ $$\text{delay}_{\text{queue}} = \frac{n \times L + (L - x)}{R}$$ \item Therefore, if you plug in the correct values: \\ $$\text{delay}_{\text{queue}} = \frac{4 \times 1500 + (1500 - 750)}{2 \times 10^{6}} = \frac{27}{8000} \text{ seconds } $$ \end{itemize} \item What does the application layer define? \begin{itemize} \item \textbf{Application layer:} specifies the shared communications protocols and interface methods used by hosts in a communications network \end{itemize} \begin{figure}[!h] \centering \includegraphics[width=7cm]{application_layer} \end{figure} \item What four basic transport layer services may an application need? \begin{itemize} \item Data integrity \item Timing \item Throughput \item Security \end{itemize} \item I would like to fetch 10 images. How many HTTP requests must my browser send to the server? \begin{itemize} \item 11: 1 for the HTML page and 10 for the other resources \end{itemize} \newpage \item For a communication session between a pair of processes, which process is the client and server? \begin{itemize} \item Client process: initiates the communication \item Server process: process that waits to be contacted \end{itemize} \item Suppose you wanted to transfer a file from a server to a client as fast as possible. Would you use TCP or UDP? \begin{itemize} \item UDP because you are not concerned about the data integrity. You can transmit the data quickly. \end{itemize} \item Why do HTTP, FTP, SMTP, and POP3 run over TCP rather than UDP? \begin{itemize} \item These protocols use TCP over UDP because of data integrity. All information requested will be collected. \end{itemize} \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7567084079, "avg_line_length": 33.54, "ext": "tex", "hexsha": "db2a0f95e6ca07a0987463bb85f6f3360386a64d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0db85ca7323cb0c398581690150cb9d86c06bc79", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JaredDyreson/CS-471-Handouts", "max_forks_repo_path": "LaTeX/7/Handout7.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0db85ca7323cb0c398581690150cb9d86c06bc79", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JaredDyreson/CS-471-Handouts", "max_issues_repo_path": "LaTeX/7/Handout7.tex", "max_line_length": 651, "max_stars_count": null, "max_stars_repo_head_hexsha": "0db85ca7323cb0c398581690150cb9d86c06bc79", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JaredDyreson/CS-471-Handouts", "max_stars_repo_path": "LaTeX/7/Handout7.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 935, "size": 3354 }
\documentclass[11pt]{article} \usepackage{tac2014} \usepackage{times} \usepackage{latexsym} \usepackage{amsmath} \usepackage{multirow} \usepackage{multicol} \usepackage{url} \DeclareMathOperator*{\argmax}{arg\,max} \setlength\titlebox{10.5cm} % Expanding the titlebox \title{The Computational Linguistics Summarization Pilot Task} \author{Kokil Jaidka$^{1}\thanks{\hspace{.2cm}Authors appear in alphabetical order, with the exception of the coordinator of the task, whom is given the first authorship.} $ , Muthu Kumar Chandrasekaran$^{2}$, Rahul Jha$^{3}$, Christopher Jones$^{4}$ \\ {\bf Min-Yen Kan}$^{2,5}${\bf , Ankur Khanna}$^{2}${\bf , Diego Molla-Aliod}$^{4}${\bf , Dragomir R. Radev}$^{3}$, \\ {\bf Francesco Ronzano}$^{6}$ and {\bf Horacio Saggion}$^{6}$ \\ \\ $^1$ Wee Kim Wee School of Communication \& Information, Nanyang Technological University, Singapore \\ $^2$ Web, IR \/ NLP Group, School of Computing, National University of Singapore, Singapore \\ $^3$ School of Information, University of Michigan, USA\\ $^4$ Division of Information \& Communication Sciences, Computing Department, Macquarie University, Australia \\ $^2$ Interactive and Digital Media Institute, National University of Singapore, Singapore \\ $^6$ Universitat Pompeu Fabra, Barcelona, Spain } \date{} \begin{document} \maketitle \begin{abstract} The Computational Linguistics (CL) Summarization Pilot Task was a pilot shared task to use citations to create summaries of scholarly research publications in the domain of computational linguistics. We describe the background for the task, corpus construction, evaluation methods for the pilot and survey the participating systems and their preliminary results. The experience gleaned from the pilot will assist in the proper organization of future shared task where difficulties with annotations and scale can be addressed. \end{abstract} \section{Introduction} This paper describes the evolution and design of the Computational Linguistics (CL) pilot task\footnote{This research is supported in part by the Singapore National Research Foundation under its International Research Centre @ Singapore Funding Initiative and administered by the IDM Programme Office, and in part made possible thanks to a summer internship granted to Christopher Jones by the Department of Computing, Macquarie University.} for the summarization of computational linguistics research papers sampled from the Association of Computational Linguistics' (ACL) anthology. This task was run concurrently with the Text Analysis Conference 2014 (TAC '14), although not formally affiliated with it. This shared task shares the same basic structure and guidelines with the formal TAC 2014 Biomedical Summarization (BiomedSumm) track. A training corpus ``topics'' from CL research papers was released, each comprising a reference paper along with some sampled papers that cited the reference paper. Participants were invited to enter their systems in a task-based evaluation, similar to BiomedSumm. This paper will describe the participating systems and survey their results from the task-based evaluation. \section{Background} Recent work \cite{mohammad2009,abu2011} in scientific document summarization have used citation sentences (also known as {\it citances}) from citing papers to create a multi document summary of the reference paper (RP). As proposed by \cite{vu2010,hoang2010} the summarization can be decomposed into finding the relevant documents; in this case, the citing papers (CPs), then selecting sentences from those papers that cite and justify the citation and finally generating the summary. To tackle each subproblem, we created a gold standard dataset where human annotators identified the citances in each of (up to) ten randomly sampled citing papers for the RP. Jaidka and Khoo \shortcite{jaidka2013}'s work on summarizing information science articles indicated that most citations clearly refer to one or more specific discourse facets of the cited paper. Discourse facets indicate the type of information described in the reference span. E.g., ``Aim'' indicates that the citation is about the Aim of the reference paper. In the CL domain, during our corpus construction, we identified that the discourse facets being cited were usually the aim of the paper, methods followed, and the results or implications of the work. Accordingly, we used a different set of discourse facets than BiomedSumm which suit our target domain of CL papers better. The resultant corpus should be viewed as a development corpus only, such that the community can enlarge it to a proper shared task with training, development and testing set divisions in the near future. \section{Corpus Construction} \label{corpus} A large and important portion of scholarly communication in the domain of computational linguistics is publicly accessible and archived at the ACL Anthology\footnote{\url{http://aclweb.org/anthology/}}. The texts from this archive are also under a Creative Commons license, which allows unfettered access to the published works for any purposes, including downstream research on summarization of its contents. We thus view the ACL Anthology as a corpus and randomly sampled published research papers as a base for building our annotated corpus. In selecting materials for resultant corpus from the Anthology, we wanted to enable citation-based summarization. To this end, with consultation from the BiomedSumm organizers, we needed to ensure that the reference paper was cited with appropriate diversity. As of the corpus construction date (18 September 2014), the live Anthology contained approximately 25K publications, exclusive of the third-party papers hosted (i.e., with metadata but without the actual . PDF of the paper) and extraneous files (i.e., front matter and full volumes). To ensure sufficient opportunity to use citation based summarization, we further removed papers published after and including 2006, leaving 13.8K publications. We randomized this list to remove any ordering effects. Starting from the top of the list, we used a combination of Google Web and Scholar searches to approximate the number of citations (i.e., citing papers (CP)). We retained any paper with over 10 citations. We vetted the citations to ensure that the citation spread was at least a window of three years, as previous work had indicated that citations over different time periods (with respect to the publication date of the RP) exhibit different tendencies \cite{N13-1067}. We then used the title search facility of the ACL Anthology Network\footnote{\url{http://clair.eecs.umich.edu/aan/index.php}} (AAN, February 2013 version), to locate the paper. We inspected and listed all citing papers' Anthology ID, title and year of publication. We note the citation count from Google / Google Scholar and AAN differ substantially. To report the final list of citing papers, we strived to provide at least three CP for each RP. We defined the following criteria (in order or priority): \begin{enumerate} \item Non-list citation (i.e., at least one citation in the body of the CP for the RP not of the form [RP,a,b,c]); \vspace{-.3cm} \item The oldest and newest citations within AAN; and, \vspace{-.3cm} \item Citations from different years. \end{enumerate} We included the oldest and newest citation regardless of criteria 1) and 3) and included a randomized sample of up to 8 additional citing paper IDs that met either criteria 1) and 3). The resulting final list was divided among the annotator group, whom are a subset of the authors of this paper from NUS and NTU. We used the same scheme used by annotators of the BiomedSumm track's corpus. Given each RP and up to 10 associated CPs, the annotation group was instructed to find citations to the RP in each CP. Annotators followed instructions used for BiomedSumm task annotation, to re-use the resources created for BiomedSumm and reduce necessary effort. Specifically, the citation text, citation marker, reference text, and discourse facet were marked for each citation of the RP found in the CP. \section{The CL-Summ Task} This shared task proposes to solve same problems posed of the BioMedSumm track, but in the domain of Computational Linguistics. This task calls for summarization frameworks to build a structured summary of a research paper -- which incorporates facet information (such as Aims, Methods, Results and Implications) from the text of the paper, and ``community summaries'' from its citing papers. \\ \noindent We define the {\it CL-Summ Task} as follows: {\bf Given}: a topic, comprising of the PDF and extracted text of an RP and up to 10 CPs. In each provided CP, the citations to the RP (or citances) have been identified. The information referenced in the RP is also annotated. Note that both the text, and the citations may be noisy, and that there could be additional citing papers that were not provided (due to sampling). Output systems to perform the following tasks, where the numbering of the task corresponds to those used in the BiomedSumm task. \begin{itemize} \item Task 1A: Identify the text span in the RP which corresponds to the citances from the CP. These may be of the granularity of a full sentence or several sentences (upto 5 sentences), and may be contiguous or not. It may also be a sentence fragment. % done \item Task 1B: Identify the discourse facet for every cited text span from a predefined set of facets. Discourse facet is about the type of information described in the reference span. A maximum of 3 reference spans can be marked for every citance. In case these spans describe different different discourse facets, the most prevalent discourse facet is annotated. \end{itemize} {\bf Evaluation}: Assess Task 1A performance by using the ROUGE~\cite{Lin:2004} score to compare the overlap of text spans in the system output versus the gold standard created by human annotators. an additional task in BioMedSumm, which was tentative, and not advertised with this shared task, was: \begin{itemize} \item Task 2: Generate a faceted summary of upto 250 words, of the reference paper, using itself and the citing papers. \end{itemize} \section{Participating teams} Nine teams expressed an interest in participating in the shared task which are listed below in alphabetical order. \begin{enumerate} \item{{\bf CCS2014}, from the IDA Center for Computing Sciences, USA. They proposed to employ a language model based on the sections of the document to find referring text and related sentences in the cited document.} \vspace{-.3cm} \item{{\bf clair\_umich$^{*\$}$} from University of Michigan, Ann Arbor, USA.} \vspace{-.3cm} \item{{\bf IHMC}, A team from IHMC, USA.} \vspace{-.3cm} \item{{\bf IITKGP\_sum}, from Indian Institute of Technology, Kharagpur, India. They planned to use citation network structure and citation context analysis to summarize the scientific articles.} \vspace{-.3cm} \item{{\bf MQ$^{*\$}$}, from Macquarie University, Australia. They plan to use the same system that was used for the BiomedSumm track, with the exception that they will not incorporate domain knowledge (UMLS). For Task~1A they proposed to use similarity metrics to extract the top n sentences from the documents. For Task~1B they planned to use a logistic regression classifier. Next, for the bonus Task~2 they will incorporate the distances from Task~1A to rank the sentences. } \vspace{-.3cm} \item{{\bf PolyAF}, from The Hong Kong Polytechnic University.} \vspace{-.3cm} \item{{\bf TabiBoun14}, from the Bogaziçi University, Turkey. They planned to modify an existing system for CL papers, which uses LIBSVM as a classification tool for facet classification, and plan to use cosine similarity to compare text spans.} \vspace{-.3cm} \item{{\bf Taln.UPF$^{*}$}, from Universitat Pompeu Fabra, Spain. They have proposed to adapt available summarization tools to scientific texts.} \vspace{-.3cm} \item{{\bf TXSUMM}, from University of Houston, Texas. Their system consists of applying similarity kernels in an attempt to better discriminate between candidate text spans (with sentence granularity). Their system uses an extractive, ranking method.} \end{enumerate} Three teams submitted system descriptions. A further two (of the three) submitted their findings. The system descriptions and self-reported task results are reported in the next sections (denoted with `*' and `\$', respectively in the above text). \section{The clair\_umich System --- Comparing Overlap of Word Synsets} \label{s:umich} \subsection{Data Preprocessing} For each RP, citing sentences were extracted from all its CP. Each citing sentence was then matched to a text segment in the original paper creating the final annotated dataset. The original source text for the papers in the CL-Summ corpus was not sentence-segmented, which made it difficult to compute evaluation metrics. Data preprocessing of the CL-Summ corpus was done in the following way -- First, sentences from the reference papers were segmented and then matched to each of these source sentences to the CL-Summ annotation files. This yielded a fixed set of source sentences from the original files, a subset of which were matched to each citing sentence. In this way, given a citing sentence, matching sentences from the source paper were compared to the gold standard sentences matched from the source paper and compute precision / recall. The average number of source sentences matched for each citing sentence was 1.28 (with standard deviation 1.92). The maximum number of source sentences matched for a citing sentence was 7. Given that the total number of source sentences for papers ranged from between 100 to 600, this made it a very challenging classification problem. \subsection{Baseline System} The team first created a baseline system based on TF.IDF cosine similarity. For any citing sentence, the system computed the TF.IDF cosine similarity with all the sentences in the RP, thus the IDF values differed across each of the 10 RPs. \subsection{Supervised System} The supervised system used knowledge-based features derived from WordNet, syntactic dependency based features, and distributional features in addition to the simple lexical features like cosine similarity. These features are described below. \begin{enumerate} \item{\bf Lexical Features:} Two lexical features were used -- TF.IDF and the LCS (Longest Common Subsequence) between the citing sentence ($C$) and source sentence $S$, which is computed as: \vspace{-.3cm} \begin{eqnarray*} \frac{|LCS|}{min(|C|,|S|)} \end{eqnarray*} \item{\bf Knowledge Based Features:} The system also used set of features based on Wordnet similarity. Six wordnet based word similarity measures were combined to obtain six knowledge based sentence similarity features using the method proposed in \cite{Banea2012}. The wordnet based word similarity measures used are path similarity, WUP similarity~\cite{Wu:1994:VSL:981732.981751}, LCH similarity~\cite{leacock1998combining}, Resnik similarity~\cite{Resnik:1995:UIC:1625855.1625914}, Jiang-Conrath similarity~\cite{Jiang97taxonomySimilarity}, and Lin similarity~\cite{Lin:1998:IDS:645527.657297}. Given each of these similarity measures, the similarities between two sentences was computed by first creating a set of senses for each of the words in each of the sentences. Given these two sets of senses, the similarity score between citing sentence $C$ and source sentence $S$ was calculated as follows: \vspace{-.3cm} \begin{eqnarray*} sim_{wn}(C,S) = \frac{(\omega + \sum_{i=1}^{|\phi|}\phi_i) * (2|C||S|)}{|C|+|S|} \end{eqnarray*} Here $\omega$ is the number of shared senses between $C$ and $S$. The list $\phi$ contains the similarities of non-shared words in the shorter text, $\phi_i$ is the highest similarity score of the $i$th word among all the words of the lower text \cite{S13-1017}. \item{\bf Syntactic Features:} An additional feature based on similarity of dependency structures was used, by applying the method described in \cite{S13-1017}. The Stanford parser was used to obtain dependency parse all the citing sentences and source sentences. Given a candidate sentence pair, two syntactic dependencies were considered equal if they have the same dependency type, govering lemma, and dependent lemma. If $R_c$ and $R_s$ are the set of all dependency relations in $C$ and $S$, the dependency overlap score was computed using the formula: \vspace{-.3cm} \begin{eqnarray*} sim_{dep}(C,S) = \frac{2*|R_c \cap R_s| * |R_c||R_s|}{|R_c|+|R_s|} \end{eqnarray*} \end{enumerate} \section{The MQ System --- Finding the Best Fit to a Citance} \label{s:mq} Given the text of a citance, the MQ system ranks the sentences of the reference paper according to its similarity to the citance. Every sentence and its citance was modeled as a vector and compared using cosine similarity. The team experimented with different forms of representing the information in the vectors, and different forms of using the similarity scores to perform the final sentence ranking. \begin{figure*} $$ \hbox{MMR} = \arg\max_{D_i\in R\setminus S}\left[\lambda(\hbox{sim}(D_i,Q)) - (1-\lambda) \max_{D_j\in S} \hbox{sim}(D_i,D_j)\right] $$ \begin{quote} Where: \begin{itemize} \item $Q$ is the citance text. \item $R$ is the set of sentences in the document. \item $S$ is the set of sentences that haven been chosen in the summary so far. \end{itemize} \end{quote} \caption{Maximal Marginal Relevance (MMR)} \label{fig:mmr} \end{figure*} \subsection{Baseline -- Using TF.IDF} \label{sec:tfidf} For the baseline system (similar to the clair\_umich team), the TF.IDF of all lowercased words was used, without removing stop words. Separate TF.IDF statistics were computed for each reference paper, using the set of sentences in the paper and the citance text of all citing papers. \subsection{Adding texts of the same topic} \label{sec:topics} Since the amount of text used to compute the TF.IDF in Section~\ref{sec:tfidf} was relatively little, the complete text of all citing papers was added, under the presumption that citing papers are presumably of the same topic as the reference paper. By adding this text we hope to include complementary information that can be useful for extending and computing the IDF component. \subsection{Adding context} \label{sec:context} In order to extend the information of each sentence in the reference paper and further add to the approach in Section~\ref{sec:topics}, the text from the reference papers was added within a context window of 20 sentences by including the neighouring sentences, centered in the target sentence. \subsection{Re-ranking using MMR} \label{sec:mmr} The last experiment used Maximal Marginal Relevance (MMR) \cite{Carbonell:1998} to rank the sentences. All sentences were represented as TF.IDF vectors of extended information as described in Section~\ref{sec:context}. Then, the final score of a sentence was the combination of the similarity with the citance and similarity of the other sentences of the summary according to the formula shown in Figure~\ref{fig:mmr}. A value of $\lambda=0.97$ was chosen. \section{The Taln.UPF System} \subsection{Pre-processing / documents’ preparation:} The UPF system carried out the following set of preprocessing steps on the papers of each topic: \begin{itemize} \item{Sentence segmentation:} To identify candidate sentences that will be validated or rejected in the following pre-processing steps; \item{Tokenizer and POS tagger:} Using the open-source GATE software \item{Sentence sanitizer:} To remove incorrectly annotated sentences, relying on a set of rules and heuristics; \item{Document structural analyzer:} To classify each sentence as belonging to one of the following document structural categories: Abstract, Introduction, Result\_Discussion, Experimental\_Procedure, Supplemental\_Data, Material\_Method, Conclusion, Acknowledgement\_Funding, and Reference; \item{Sentence TF.IDF vector computation:} To associate to each sentence a TF.IDF vector where the IDF values are computed over all the papers of the related topic (up to 10 CP and one RP). \end{itemize} \subsection{Task 1A: Algorithm for identifying reference paper text spans for each citance} \begin{itemize} \item{For each citance its global citatance context span was considered as the union of the citance context spans} marked by human annotators (in this case, there was only one available human annotation, so no union was required). \item{From the citing paper, those sentences were selected} which overlapped totally or partially the global citatance context span; these sentences were referred to as the citance context sentences (CtxSent1,..., CtxSentN), \item{Citances were characterized by the document structural category associated with most of its citance context sentences (CtxSent1,..., CtxSentN)}. In case of tie in the number of occurrences of document structural categories among all the citance context sentences, the most frequently chosen document structural category for the citing paper was preferred. In case of persisting ties, the document structural category that is most frequent in the whole set of citing and reference papers was preferred. \item{Each reference paper sentence (RefSent) was assigned a score} equal to the sum of its TF*IDF vector cosine similarity with each citance context sentence (CtxSent1,..., CtxSentN). \item{The RefSent scores were weighted by the relative relevance} of this kind of link between document structural categories, in the whole training corpus. For instance, if there is a citance associated to the INTRO that references a RefSent belonging to the Abstract and in the whole training corpus this situation occurs in 6.5\% of citance-referenced sentence pairs, the RefSent score is multiplied by 0.065, obtaining the final RefSent score. \item{The first 3 reference paper sentences} (RefSents) with the highest final RefSent score were chosen as the reference paper text spans. \end{itemize} \subsection{Task 1B: Algorithm for identifying the discourse facet of the cited text spans} A linear-kernel SVM classifier was trained to associate each citance with one of the five text facets considered in Task 1B. Each citance was characterized by lexical and semantic features extracted from the sentences belonging to the citance context together with the sentences of the reference paper selected as outcome of Task 1A. Some of the features exploited were: \begin{itemize} \item{Relative number of sentences belonging to each document structural category;} \vspace{-.3cm} \item{Relative number of sentences belonging to the citance context or reference paper;} \vspace{-.3cm} \item{Relative number of POS;} \vspace{-.3cm} \item{Presence of key lexical patterns.} \end{itemize} \section{Evaluation and Results} Two teams have submitted their results so far, as self-assessed using ROUGE \cite{Lin:2004}. ROUGE (in specific, the ROUGE-L variant) is a popular evaluation method for summarization systems that compares the text output of the system against a set of target summaries. Since ROUGE uses the actual contents words, and not the offset information of the setences chosen by the annotation team, we expect non-zero results for cases when a system chooses a sentence that is somewhat similar to (but not identical) to one chosen by annotators. The MQ system was an unsupervised system while clair\_umich system was supervised. clair\_umich reports cross validated performance over the 10 topics while MQ evaluated their system over all 10 topics in a single run. The ROUGE-L scores have been calculated using the system output of a set of selected sentences as the system summary, and comparing their overlap against the target summaries are the sentences given by the annotators. The following paragraphs describe the results for Tasks~1A,~1B, and the bonus Task~2 which was attempted by the MQ system. \subsection{Task 1A: For each citance, identify the spans of text (cited text spans) in the RP} \begin{table*} \centering \begin{tabular}{|r|r|r|r|r|r|} \hline \multicolumn{3}{|c|}{MQ} & \multicolumn{3}{|c|}{clair\_umich}\\ \hline P & R & $F_1$ & P & R & $F_1$\\ \hline 0.212 & 0.335 & 0.223 & .444 & .574 & 0.487\\ \hline \end{tabular} \caption{Task~1A performance for the participating systems expressed as ROUGE-L score averaged over all topics.} \label{tab:task1a} \end{table*} Table~\ref{tab:task1av2} shows the ROUGE-L $F_1$ scores of each individual reference document from the CL-Summ dataset. \begin{table*} \centering \begin{tabular}{|l|r|r||l|r|r|} \hline Paper ID & MQ & clair\_umich & Paper ID & MQ System & clair\_umich \\ \hline C90-2039 & 0.235 & 0.635 & J00-3003 & 0.196 & 0.559\\ C94-2154 & 0.288 & 0.536 & J98-2005 & 0.101 & 0.344\\ E03-1020 & 0.239 & 0.478 & N01-1011 & 0.221 & 0.498\\ H05-1115 & 0.350 & 0.375 & P98-1081 & 0.200 & 0.367\\ H89-2014 & 0.332 & 0.546 & X96-1048 & 0.248 & 0.535\\ \hline \end{tabular} \caption{Task~1A ROUGE-L F1 scores for individual topics.} \label{tab:task1av2} \end{table*} \subsection{Task 2: Generate a structured summary of the RP and all of the community discussion of the paper represented in the citances} The MQ team performed an additional test to see whether information from the citances were useful for building an extractive summary, as is the case with the BiomedSumm data \cite{Molla:ALTA2014}. They implemented extractive summarization systems with and without information from the citances. The summarizers without information from the citances scored each sentence as the sum of the TF.IDF values of the sentence elements. They tried the TF.IDF approach described in Section~ref{sec:tfidf}. The summarizers with information from the citances scored each candidate sentence $i$ on the basis of rank($i$,$c$) obtained in Task 1A, which has values between 0 (first sentence) and $n$ (last sentence), and represents the rank of sentence $i$ in citance $c$: $$ \hbox{score}(i) = \sum_{c\in\hbox{citances}}1-\frac{\hbox{rank}(i,c)}{n} $$ The summaries were evaluated using ROUGE-L, where the model summaries are the abstracts of the corresponding papers. Since paper X96-1048 of the SciSumm data did not have an abstract, it was omitted from this experiment. An example excerpt from a target summary (Abstract) for the reference paper J03-3003 is: \noindent\fbox{\parbox{.47\textwidth}{\it \footnotesize We describe a statistical approach for modeling dialogue acts in conversational speech, i.e., speech- act-like units such as STATEMENT, QUESTION, BACKCHANNEL, AGREEMENT, DISAGREE- MENT, and APOLOGY. Our model detects and predicts dialogue acts based on lexical, collocational, and prosodic cues, as well as on the discourse coherence of the dialogue act sequence. The dialogue model is based on treating the discourse structure of a conversation as a hidden Markov model and the individual dialogue acts as observations emanating from the model states. Constraints on the likely sequence of dialogue acts are modeled via a dialogue act n-gram... We achieved good dialogue act labeling accuracy (65\% based on errorful, automatically recognized words and prosody, and 71\% based on word transcripts, compared to a chance baseline accuracy of 35\% and human accuracy of 84\%) and a small reduction in word recognition error.}} The MQ System's output baseline summary for the same reference paper is 20 sentences long; below is an excerpt: \noindent\fbox{\parbox{.47\textwidth}{\it \footnotesize Dialogue Act Modeling for Automatic Tagging and Recognition of Conversational Speech. In all these cases, DA labels would enrich the available input for higher-level processing of the spoken words. The relation between utterances and speaker turns is not one-to-one: a single turn can contain multiple utterances, and utterances can span more than one turn (e.g., in the case of backchanneling by the other speaker in midutterance). The most common of these are the AGREEMENT/ACCEPTS. One frequent example in our corpus was the distinction between BACKCHANNELS and AGREEMENTS (see Table 2), which share terms such as ``right'' and ``yeah''. Networks compare to decision trees for the type of data studied here. Neural networks are worth investigating since they offer potential advantages over decision trees.} } Table~\ref{tab:task2v2} shows the breakdown of ROUGE-L $F_1$ scores per document. \begin{table*} \centering \begin{tabular}{|l|r|r|r||l|r|r|r|} \hline Paper ID & TF.IDF & Task 1A & Task 1A & Paper ID & TF.IDF & Task 1A & Task 1A \\ & & TF.IDF & MMR & & & TF.IDF & MMR \\ \hline C90-2039\_TRAIN & 0.347 & 0.315 & 0.293 & J00-3003\_TRAIN & 0.221 & 0.382 & 0.367\\ C94-2154\_TRAIN & 0.095 & 0.123 & 0.120 & J98-2005\_TRAIN & 0.221 & 0.216 & 0.233\\ E03-1020\_TRAIN & 0.189 & 0.189 & 0.196 & N01-1011\_TRAIN & 0.187 & 0.268 & 0.284\\ H05-1115\_TRAIN & 0.134 & 0.306 & 0.321 & P98-1081\_TRAIN & 0.241 & 0.210 & 0.206\\ \cline{5-8} H89-2014\_TRAIN & 0.294 & 0.319 & 0.320 & Average & 0.214 & 0.259 & 0.260 \\ \hline \end{tabular} \caption{ROUGE-L $F_1$ results for summaries generated by the MQ system.} \label{tab:task2v2} \end{table*} \section{Discussion} \subsection{Comparing the MQ System with the BioMedSumm task} Table~\ref{tab:task1a} compares the results of the MQ system's experiments with the SciSumm data, against the results from the BiomedSumm data. In all results the systems were designed to return 3 sentences, as specified in the shared task. All short sentences (under 50 characters) were ignored, to avoid including headings or mistakes made by the sentence segmentation algorithm. \begin{table*} \centering \begin{tabular}{|l|r|r|r|c|r|r|r|c|} \hline & \multicolumn{4}{|c|}{CL-Summ} & \multicolumn{4}{|c|}{BiomedSumm}\\ \hline Run & P & R & $F_1$ & CI & P & R & $F_1$ & CI\\ \hline TF.IDF & 0.198 & 0.316 & 0.211 & 0.185--0.240 & 0.326 & 0.273 & 0.279 & 0.265--0.293\\ topics & 0.201 & 0.324 & 0.217 & 0.191--0.245 & 0.357 & 0.288 & 0.300 & 0.285--0.316\\ context & 0.214 & 0.339 & 0.225 & 0.197--0.255 & 0.372 & 0.291 & 0.308 & 0.293--0.323\\ MMR & 0.212 & 0.335 & 0.223 & 0.195--0.251 & 0.375 & 0.290 & 0.308 & 0.293--0.323\\ \hline \end{tabular} \caption{ROUGE-L results of the MQ system runs for Task 1A.} \label{tab:task1a} \end{table*} The results show an improvement in both domains, with the exception that MMR does not improve over the run that uses TF.IDF over context in CL-Summ, whereas there is an improvement in BiomedSumm. The absolute values are better in the BiomedSumm data, and looking at the confidence intervals it can be presumed that the difference between the best and the worst run is statistically significant in the BiomedSumm data. The results in the CL-Summ data are poorer in general and there are no statistically significant differences. However, this may be an artifact of the small size of the corpus. Overall, the improvement of results in CL-Summ mirrors that of the BiomedSumm data, so it can be suggested that on adding more information to the models that compute TF.IDF, the results improve. It is expected that alternative approaches, which gather related information to be added for computing the vector models will produce even better results. The results with MMR appears to be contradictory across the two domains, but the difference is small and may not be statistically significant. \subsection{Tweaking the Parameters --- the clair\_umich Baseline} For any citing sentence, the TF.IDF cosine similarity was computed with all the sentences in the source paper, and any sentences that had a cosine similarity higher than a given threshold were added to the matched sentences. Table~\ref{tab:clairumichbaseline} shows the precision / recall for different values of the cosine threshold. \begin{table}[h] \centering \begin{tabular}{|l|r|r|r|r|} \hline Similarity & Precision & Recall & $F_1$ \\ Threshold & & & \\ \hline 0.01 & 0.027 & 0.641 & 0.051\\ 0.05 & 0.048 & 0.426 & 0.087\\ 0.1 & 0.060 & 0.235 & 0.095\\ 0.2 & 0.079 & 0.081 & 0.080\\ 0.3 & 0.062 & 0.032 & 0.042\\ 0.4 & 0.022 & 0.085 & 0.012\\ 0.5 & 0.007 & 0.002 & 0.003\\ \hline \end{tabular} \caption{Precision/Recall for different values of the cosine threshold for the baseline clair\_umich system.} \label{tab:clairumichbaseline} \end{table} The $F_1$ scores seems to reach a maximum at the similarity threshold of 0.1. The recall at the threshold of 0.1 is 0.23, while the precision is only 0.06. This suggests that initial progress can be made on this problem by first removing these spurious matches that have high lexical similarity. \subsection{Error Analysis for the Participating Systems} Some drawbacks were observed in the approach and evaluation for the MQ system. The example below illustrates the MQ system's output for task1a, for the reference paper H89-2014: \noindent\fbox{\parbox{.47\textwidth}{\it \footnotesize ``The statistical methods can be described in terms of Markov models.'' ``An alternative approach taken by Jelinek, (Jelinek, 1985) is to view the training problem in terms of a "hidden" Markov model: that is, only the words of the training text are available, their corresponding categories are not known.'' ``In this regard, word equivalence classes were used (Kupiec, 1989).'' The target sentence was: ``The work described here also makes use of a hidden Markov model.'' }} The first sentence of the sample output was very similar to the target sentence. It was not the best match, but it was a close match, and an evaluation metric such as ROUGE would reward it. On the other hand, the second sentence, even though it talked about HMMs, it was not strictly about the approach used by the paper and therefore it should not be rewarded with a good score. However, ROUGE would be too lenient here. This is one of the issues identified by the MQ system in following a purely lexical approach. In the clair\_umich system, a number of errors made by the baseline system are due to source sentences that match the words but differ slightly in their information content. \begin{figure} \noindent\fbox{\parbox{.47\textwidth}{Citing text: ``use the BNC to build a co-occurrence graph for nouns, based on a co-occurrence frequency threshold'' \\ \emph{True positives:} \begin{itemize} \vspace{-.2cm} \item{\small ``Following the method in (Widdows and Dorow, 2002), we build a graph in which each node represents a noun and two nodes have an edge between them if they co-occur in lists more than a given number of times.''} \end{itemize} \emph{False positives:} \begin{itemize} \vspace{-.2cm} \item{\small ``Based on the intuition that nouns which co-occur in a list are often semantically related, we extract contexts of the form Noun, Noun,... and/or Noun, e.g. ``genomic DNA from rat, mouse and dog''.''} \vspace{-.2cm} \item{\small ``To detect the different areas of meaning in our local graphs, we use a cluster algorithm for graphs (Markov clustering, MCL) developed by van Dongen (2000).''} \vspace{-.2cm} \item{\small ``The algorithm is based on a graph model representing words and relationships between them.''} \end{itemize} }} \caption{Lexically similar false positive sentences.} \label{f:2} \end{figure} An example is shown in Figure~\ref{f:2}. Here, even though the false positive sentences contain the same lexical items (nouns, co-occurrence, graph), they differ slightly in the facts presented. Detection of such subtle differences in meaning might be challenging for an automated system. Another set of difficult sentences is when the citing sentence says something that is implied by the sentence in the RP, as evident in Figure~\ref{f:3}. \begin{figure} \noindent\fbox{\parbox{.47\textwidth}{Citing text: ``The line of our argument below follows a proof provided in ... for the maximum likelihood estimator based on nite tree distributions'' \emph{False negatives:} \begin{itemize} \item{\small ``We will show that in both cases the estimated probability is tight.''} \end{itemize} }} \caption{Implied example.} \label{f:3} \end{figure} Here, the citing text mentions a proof from the RP, but to match the sentence in the RP, the system needs to understand that the act of showing something in a scientific paper constitutes a proof. \section{Shortcomings and Limitations} There were several errors and shortcomings of the dataset which were identified in the process of annotating and parsing the corpus for use by the participating systems. \begin{itemize} \item{The use of ``...'' where text spans are snippets}: The use of ``...'' follows the BioMedSumm standard practice of indicating discontiguous texts. In Citation Text and Reference Text fields, the ``...'' means that there is a gap between two text spans (citation spans or reference spans). They may be on different pages, so the gap might be a page number or a footnote. There might be a formula or a figure there, or some text encoding which is not a part of the annotation. However, this notation caused mismatches for sentences which used text from different parts of the same sentence. \item{Small size of the training corpus:} The corpus comprised only a set of 10 topics, each with upto 10 citing documents. In this small dataset, participants were asked to conduct a 10-fold cross validation. The small size of the data set meant that there were no statistically significant results, but significance could only be guessed at from the overall trend of the data. \item{Errors in parsing the file:} Some of the older PDF files, when parsed to text or XML, had such as misspelled words, spaces within words, sentences in the wrong place and so on. Unfortunately these errors were OCR parsing errors, and not within our control. We recommended that participants configure their string matching to be lenient enough to alleviate such problems. \item{Errors in citation/reference offset numbers:} In the original annotations, citation/reference offset numbers were character-based, and relative to an XML encoding which was not shared in the task, and did not match with the offset numbers on the text-only, cleaned version of the document. Although the text versions of the source documents were shared with the intention to help the participants, this often made their tasks more difficult if their system was geared towards numerical and not system matching. A solution was found for reference offsets by revising them to sentence ID numbers based on available XML files from the clair\_umich system's pre-processing stage; however, the citation offsets remain character-based. \item{Text encoding: Often, the text was not in UTF-8 format as expected}. Some participating teams, like the UPF, solved this by running the universal charset tool provided by Google Code over all the text and annotations in order to determine the right file encoding to use. It was found that some of the files were also in WINDOWS-1252 and GB18030 formats. \item{Errors in file construction:} An automatic, open-source software was used to map the citation annotations from a software, Protege, to a text file. However, participants identified several errors in the output - especially in cases where there was one-to-many mapping between citations and references. Besides this, several annotation texts had no annotation ID (Citance Number field). \end{itemize} \section{Conclusions} This paper describes the computational linguistics pilot task for the faceted summarization of scholarly papers. We describe the three systems participated in the shared task, and describe the evaluation of two submitted runs. The teams used versions of TF.IDF as baselines. The MQ system followed an unsupervised algorithm while clair\_umich followed a supervised algorithm. For identifying referenced text spans in reference papers, the best performance was obtained by clair\_umich's supervised algorithm using lexical, syntactic and knowledge-based features to calculate the overlap between sentences in the citation span and the reference paper. Although no system submitted results for Task 1B, the task involving identifying the discourse facets of reference text, TALN.UPF submitted an algorithm which they aim to implement. Finally, an added experiment by the MQ system sought to compare baseline summaries of reference papers, based on a TF*IDF calculation, against gold standard summaries, comprising the reference paper's abstracts. The clair\_umich system incorporated WordNet synsets for expanding and comparing cited text with reference papers, and the use of syntactic features further enriched the calculation of overlap. On the other hand, the MQ system relied exclusively on reading and comparing texts. Furthermore, their system was originally built for the BioMedSumm task -- however, they had to discard some domain-specific features for this task. It is possible that the lack of domain knowledge, coupled with OCR-related and PDF parsing errors, affected the performance of their system in the CL domain. This task is an initiative for encouraging the development of tools and approaches for scientific summarization. It helped us identify existing tools and resources to leverage on for this purpose and also the hindrances which needed to be overcome in order to have a systematic and well-coordinated evaluation. However, with results of only for two systems, it is not possible to conjecture at what may be the better methods for summarizing CL research papers. The resources from this task, and its corpus, are freely available for interested research groups to experiment with; the corpus is first-of-its-kind summarization corpus for compuational linguistics. The results of the pilot are encouraging: there seems to be ample interest from the community and it seems possible to answer more detailed methodological questions with a more detailed analysis and a larger datasets. We encourage the community to support a future proposal to enlarge the pilot to a full scale shared task. We plan a systematic annotation of a training, development as well as test sets, and the availability of more than one gold standard annotation, and open-sourced tools and resources to support the efforts of participating teams. We invite the community to join us in this endeavour with any resources and time they can spare. \bibliographystyle{tac2014} \section{Acknowledgements} This Shared Task is supported in part by the Singapore National Research Foundation under its International Research Centre @ Singapore Funding Initiative and administered by the IDM Programme Office. The authors also acknowledge and thank the BiomedSumm organizers -- especially Lucy Vanderwende, Prabha Yadav, and Hoa Trang Dang -- for lending their expertise in organizing this pilot.\\ The {\bf MQ system} was made possible thanks to a winter internship granted to Christopher Jones by the Department of Computing, Macquarie University. \\ The {\bf clair\_umich system} wishes to acknowledge the helpful suggestions of Ben King, Mohamed Abouelenien and Reed Coke. \\ The {\bf TALN.UPF} system is supported by the project Dr. Inventor (FP7-ICT-2013.8.1 611383), programa Ram\'on y Cajal 2009 (RYC-2009-04291), and the project TIN2012-38584-C06-03 Ministerio de Econom\'{\i}a y Competitividad, Secretar\'{\i}a de Estado de Investigaci\'on, Desarrollo e Innovaci\'on, Spain. \bibliography{tac2014} \end{document}
{ "alphanum_fraction": 0.7725662109, "avg_line_length": 48.4334433443, "ext": "tex", "hexsha": "af928a5799dde0fef5a1e03cfcb6ac0029de2981", "lang": "TeX", "max_forks_count": 89, "max_forks_repo_forks_event_max_datetime": "2022-03-19T18:47:56.000Z", "max_forks_repo_forks_event_min_datetime": "2015-04-01T14:19:19.000Z", "max_forks_repo_head_hexsha": "3aa7f89afbe051d7202575b46e8f7449f7a088b0", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "yolochai/scisumm-corpus", "max_forks_repo_path": "publications/TAC2014_workshop_paper/tac2014-comments.tex", "max_issues_count": 24, "max_issues_repo_head_hexsha": "3aa7f89afbe051d7202575b46e8f7449f7a088b0", "max_issues_repo_issues_event_max_datetime": "2021-07-28T08:14:57.000Z", "max_issues_repo_issues_event_min_datetime": "2016-03-05T17:28:14.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "yolochai/scisumm-corpus", "max_issues_repo_path": "publications/TAC2014_workshop_paper/tac2014-comments.tex", "max_line_length": 437, "max_stars_count": 198, "max_stars_repo_head_hexsha": "3aa7f89afbe051d7202575b46e8f7449f7a088b0", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "yolochai/scisumm-corpus", "max_stars_repo_path": "publications/TAC2014_workshop_paper/tac2014-comments.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-21T19:12:20.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-03T06:35:05.000Z", "num_tokens": 11200, "size": 44026 }
\documentclass[shownotes,11pt, aspectratio=169]{beamer} \usepackage{pgfpages} % These slides also contain speaker notes. You can print just the slides, % just the notes, or both, depending on the setting below. Comment out the want % you want. \setbeameroption{hide notes} % Only slide %\setbeameroption{show only notes} % Only notes %\setbeameroption{show notes on second screen=right} % Both \usepackage{helvet} \usepackage[default]{Fira Sans} \usepackage{array} \usepackage{caption} %\usepackage[clean]{svg} \usepackage{tikz} \usepackage{verbatim} \setbeamertemplate{note page}{\pagecolor{yellow!5}\insertnote} \usetikzlibrary{positioning} \usetikzlibrary{snakes} \usetikzlibrary{calc} \usetikzlibrary{arrows} \usetikzlibrary{decorations.markings} \usetikzlibrary{shapes.misc} \usetikzlibrary{matrix,shapes,arrows,fit,tikzmark} \usepackage{amsmath} \usepackage{mathpazo} \usepackage{hyperref} \usepackage{lipsum} \usepackage{multimedia} \usepackage{graphicx} \usepackage{multirow} %\usepackage{graphicx} \usepackage{dcolumn} \usepackage{bbm} \usepackage{tfrupee} %%%%more packages%%% \usepackage{tabulary} %\usepackage[usenames,dvipsnames]{pstricks} %\usepackage[capposition=top]{floatrow} \usepackage{lineno,hyperref} \usepackage{epsfig} \usepackage{graphics} \usepackage{psfrag} \usepackage{etoolbox} \appto\TPTnoteSettings{\footnotesize} \usepackage{color} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{eucal} \usepackage{amsbsy} \usepackage{url} \usepackage{color} \usepackage{lineno} \usepackage{amssymb} %\usepackage{adjustbox} \newcommand{\overbar}[1]{\mkern 2.5mu\overline{\mkern-2.5mu#1\mkern-2.5mu}\mkern 2.5mu} \usepackage{booktabs} %%To use toprule, midrule, bottomrule, etc. \usepackage{rotating} %% To use sidewaystable \usepackage{dcolumn} \usepackage{longtable} \usepackage{threeparttable} \usepackage{tabularx} %%%% \newcolumntype{d}[0]{D{.}{.}{5}} \usepackage{changepage} \usepackage{appendixnumberbeamer} \newcommand{\beginbackup}{ \newcounter{framenumbervorappendix} \setcounter{framenumbervorappendix}{\value{framenumber}} \setbeamertemplate{footline} { \leavevmode% \hline box{% \begin{beamercolorbox}[wd=\paperwidth,ht=2.25ex,dp=1ex,right]{footlinecolor}% % \insertframenumber \hspace*{2ex} \end{beamercolorbox}}% \vskip0pt% } } \newcommand{\backupend}{ \addtocounter{framenumbervorappendix}{-\value{framenumber}} \addtocounter{framenumber}{\value{framenumbervorappendix}} } \usepackage{graphicx} \usepackage[space]{grffile} % These are my colors -- there are many like them, but these ones are mine. \definecolor{blue}{RGB}{0,114,178} \definecolor{red}{RGB}{213,94,0} \definecolor{yellow}{RGB}{240,228,66} \definecolor{green}{RGB}{0,158,115} \definecolor{applegreen}{rgb}{0.55, 0.71, 0.0} \definecolor{ao(english)}{rgb}{0.0, 0.5, 0.0} \hypersetup{ colorlinks=false, bookmarks=true, linkbordercolor = {white}, linkcolor = {blue} } %% I use a beige off white for my background \definecolor{MyBackground}{RGB}{255,253,218} %% Uncomment this if you want to change the background color to something else \setbeamercolor{background canvas}{bg=MyBackground} %% Change the bg color to adjust your transition slide background color! \newenvironment{transitionframe}{ \setbeamercolor{background canvas}{bg=yellow} \begin{frame}}{ \end{frame} } \setbeamercolor{frametitle}{fg=blue} \setbeamercolor{title}{fg=black} \setbeamertemplate{footline}[frame number] \setbeamertemplate{navigation symbols}{} \setbeamertemplate{itemize items}{-} \setbeamercolor{itemize item}{fg=blue} \setbeamercolor{itemize subitem}{fg=blue} \setbeamercolor{enumerate item}{fg=blue} \setbeamercolor{enumerate subitem}{fg=blue} \setbeamercolor{button}{bg=MyBackground,fg=blue,} % If you like road maps, rather than having clutter at the top, have a roadmap show up at the end of each section % (and after your introduction) % Uncomment this is if you want the roadmap! % \AtBeginSection[] % { % \begin{frame} % \frametitle{Roadmap of Talk} % \tableofcontents[currentsection] % \end{frame} % } \setbeamercolor{section in toc}{fg=blue} \setbeamercolor{subsection in toc}{fg=red} \setbeamersize{text margin left=1em,text margin right=1em} \newenvironment{wideitemize}{\itemize\addtolength{\itemsep}{10pt}}{\enditemize} \title[]{\textcolor{blue}{Macroeconomics: Lecture 6}} \author[SM]{Sumit Mishra} \institute[IFMR]{\small{\begin{tabular}{c} IFMR, Sri City \\ \end{tabular}}} \date{14 October, 2019} %\documentclass[10pt]{beamer} %\input{slideclass.tex} %---------------------------------------------------------------------------------------- % TITLE PAGE %---------------------------------------------------------------------------------------- \begin{document} \section{Introduction} \begin{frame} \titlepage % Print the title page as the first slide \end{frame} %\colorlinks=true %\begin{frame} %\frametitle{Overview} % Table of contents slide, comment this block out to remove it %\tableofcontents % Throughout your presentation, if you choose to use \section{} and \subsection{} commands, these will automatically be printed on this slide as an overview of your presentation %\end{frame} %---------------------------------------------------------------------------------------- % PRESENTATION SLIDES %---------------------------------------------------------------------------------------- %------------------------------------------------ %\section{Preliminaries} %\subsection{Introduction} \begin{frame} \frametitle{Introduction} Why growth matters? \\ Data on infant mortality rates: \begin{itemize} \item 20\% in the poorest 1/5 of all countries \item 0.4\% in the richest 1/5 \item In Malawi, more than 70\% of people live on less than \$2/day. \end{itemize} One-fourth of the poorest countries have had famines during the past 3 decades. \\ Poverty is associated with oppression of women and minorities. \end{frame} \begin{frame} \frametitle{Income and Poverty in the World} \begin{figure}[ht] \centering \makebox[0.55\linewidth][c]{ \includegraphics[clip,width=0.8\linewidth]{graphs/poverty_income_pc.png}} %\caption*{Lemonade Price Is Always Fixed at 0.75c}} \end{figure} \end{frame} \begin{frame} Fact 1: \textbf{There is enormous variation in per capita income across economies. The poorest countries have per capita incomes that are less than 5 percent of per capita income in the richest countries.} \vspace{.25in}\noindent Several notes: \begin{itemize} \item Income per capita (or GDP per capita) is not the sole measure of what is good: but it's a useful summary statistic \item Income per capita ignores distribution of income within a country \item Comparing income per capita across countries is not trivial \begin{itemize} \item You have to convert between currencies \item Countries have different relative prices for goods \item What is the ``right'' way to value haircuts, apples, or cars across countries? \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Rich Countries} %\linespread{1} %\begin{tiny} \makebox[\linewidth][c]{ \begin{tabular}{lccccc}\hline & GDP per capita & GDP per worker & LF Part. Rate & Avg. Growth & Years to \\ Country & 2008 & 2008 & 2008 & 1960-2008 & Double \\ \hline \hline United States & \$43,326 & \$84,771 & 0.51 & 1.6 & 43 \\ Japan & 33,735 & 64,778 & 0.52 & 3.4 & 21 \\ France & 31,980 & 69,910 & 0.46 & 2.2 & 30 \\ United Kingdom & 35,345 & 70,008 & 0.51 & 1.9 & 36 \\ Spain & 28,958 & 57,786 & 0.50 & 2.7 & 26 \\ \hline \end{tabular}} %\end{tiny} \end{frame} \begin{frame} \frametitle{Poor Countries} %\linespread{1} %\begin{small} \makebox[\linewidth][c]{ \begin{tabular}{lccccc}\hline & GDP per capita & GDP per worker & LF Part. Rate & Avg. Growth & Years to \\ Country & 2008 & 2008 & 2008 & 1960-2008 & Double \\ \hline \hline China & 6,415 & 10,938 & 0.59 & 5.6 & 13 \\ India & 3,078 & 7,801 & 0.39 & 3.0 & 24 \\ Nigeria & 1,963 & 6,106 & 0.32 & 0.6 & 114 \\ Uganda & 1,122 & 2,604 & 0.43 & 1.3 & 52 \\ \hline \end{tabular}} %\end{small} \end{frame} \begin{frame} \frametitle{Growth Miracles} \begin{tabular}{lccccc}\hline & GDP per capita & GDP per worker & LF Part. Rate & Avg. Growth & Years to \\ Country & 2008 & 2008 & 2008 & 1960-2008 & Double \\ \hline \hline Hong Kong & 37,834 & 70,940 & 0.53 & 4.3 & 16 \\ Singapore & 49,987 & 92,634 & 0.54 & 4.1 & 17 \\ Taiwan & 29,645 & 62,610 & 0.47 & 5.1 & 14 \\ South Korea & 25,539 & 50,988 & 0.50 & 4.5 & 16 \\ \hline \end{tabular} \end{frame} \begin{frame} \frametitle{Growth Disasters} \begin{tabular}{lccccc}\hline & GDP per capita & GDP per worker & LF Part. Rate & Avg. Growth & Years to \\ Country & 2008 & 2008 & 2008 & 1960-2008 & Double \\ \hline \hline Venezuela & 9,762 & 21,439 & 0.46 & -0.1 & -627 \\ Haiti & 1,403 & 3,164 & 0.44 & -0.4 & -168 \\ Madagascar & 810 & 1,656 & 0.49 & -0.1 & -488 \\ Zimbabwe & 135 & 343 & 0.40 & -1.5 & -47 \\ \hline \end{tabular} \end{frame} \begin{frame} \frametitle{Distribution of Population by GDP per Worker, 2008} \begin{center} \includegraphics[scale=0.8]{graphs/figure_1_1.pdf} \end{center} \end{frame} \begin{frame} \frametitle{World Population by GDP per Worker, 1960 and 2008} \begin{center} \includegraphics[scale=0.8]{graphs/figure_1_2.pdf} \end{center} \end{frame} \begin{frame} \frametitle{Introduction} Fact 2: \textbf{Rates of economic growth vary substantially across countries.} \vspace{.25in}\noindent Notes: \begin{itemize} \item We will try to distinguish whether these are long-term differences or just transitional differences \item If they are long-term, then eventually some countries will be infinitely rich compared to others \item We think most differences are transitional \end{itemize} \end{frame} \begin{frame} Fact 3: \textbf{Growth rates are not generally constant over time. For the world as a whole, growth rates were close to zero over most of history but have increased sharply in the twentieth century. For individual countries, growth rates also change over time.} \vspace{.25in}\noindent Note: \begin{itemize} \item The big changes in growth rates over history are from pre-Industrial Revolution (close to 0\% growth) to modern times (roughly 1.85\% growth per year for developed countries) \item The big changes in growth rates within countries tend to be as they transition from poor to rich (e.g. Japan or China), after which growth slows down. \end{itemize} \end{frame} \begin{frame} \frametitle{World GDP per Capita Growth Rates} \begin{center} \includegraphics[scale=0.8]{graphs/figure_1_3.pdf} \end{center} \end{frame} \begin{frame} \frametitle{Introduction} Fact 4: \textbf{A country's relative position in the world distribution of per capita incomes is not immutable. Countries can go from being ``poor'' to being ``rich'', and vice versa.} \vspace{.25in}\noindent Notes: \begin{itemize} \item The ``growth disasters'' in the table were all very well off in 1960 compared to East Asia. Now they are well behind \item The ``growth miracles'' in the table were though, in 1960, to be on the path to starvation and destitution. \item What are the sources of these movements in rankings? \end{itemize} \end{frame} \begin{frame} Fact 5: \textbf{Growth in output and growth in the volume of international trade are closely related.} \vspace{.25in}\noindent Notes: \begin{itemize} \item Growth in trade is associated with growth in output, but not necessarily level of trade (Japan does not actually trade much, but is rich) \item Rapid growth in trade is no necessarily just growth in exports from East Asia (China and Korea also import a lot more than they used to) \end{itemize} \end{frame} \begin{frame} \frametitle{Growth in Trade and Growth in Output} \begin{center} \includegraphics[scale=0.8]{graphs/figure_1_5.pdf} \end{center} \end{frame} \begin{frame} Fact 7: \textbf{Both skilled and unskilled workers tend to migrate from poor to rich countries or regions.} \vspace{.25in}\noindent Notes: \begin{itemize} \item Implies that return to both kinds of labour is higher in developed countries \item Shouldn't scarcity in poor countries imply a large premium to skilled workers? \end{itemize} \end{frame} \begin{frame} \frametitle{Big Questions} \textbf{Why are some countries so rich and others so poor?} \vspace{.25in}\noindent Answers? \begin{itemize} \item Level differences \item Different levels of human capital \item Different institutions supporting innovation/technology adoption/entrepreneurship \end{itemize} \end{frame} \begin{frame} \frametitle{Big Questions} \textbf{What is the engine of growth?} \vspace{.25in}\noindent Answers? \begin{itemize} \item Technological progress - new goods, or better versions of old goods \item Not accumulation of more physical or human capital - those cannot sustain growth \item Ultimately technological progress will rely on population - more people, more ideas \end{itemize} \end{frame} \begin{frame} \frametitle{Big Question} \textbf{What creates growth miracles in some countries?} \vspace{.25in}\noindent Answers? \begin{itemize} \item Reversing what made them poor \item Changing institutions to foster technology adoption (copying?) \item Changing institutions to create larger markets (trade, internal markets) to support innovation/adoption \end{itemize} \end{frame} \section{Standards of Living: Measurement} \begin{frame}{PPP Measurement} \textit{Assume that the average consumer in Russia and the average consumer in the United States buy the quantities and pay the prices indicated in the table below:} \\ \begin{center} \begin{tabular}{lcccc} \hline ~ & \multicolumn{2}{c}{Food} & \multicolumn{2}{c}{Non-Food Item} \\ \hline ~ & Price & Quantity & Price & Quantity \\ Russia & 300,000 Roubles & 1 & 40,000 Roubles & 1 \\ United States & \$10,000 & 1 & \$10,000 & 1 \\ \hline \end{tabular} \end{center} A few points: \begin{itemize} \item In Russia, an average person buys car once a 15 years. \item \$1 = 10 Roubles. \end{itemize} It turns out that per-capita consumption in Russia is \$2,000. \end{frame} \begin{frame}{PPP Measurement} \begin{itemize} \item We can improve upon the calculation we just did. \item It would be nice if we have some common prices. \item Let's say we use the American prices. \item If we use American prices and substitute them into Russian bundle..\pause \item .. Russian consumption is $0.07*100 + 1*10,000$ \item This type of computation is at the heart of PPP estimates. \item These estimates use average prices across countries. \end{itemize} \end{frame} \end{document}
{ "alphanum_fraction": 0.7088914316, "avg_line_length": 32.6527472527, "ext": "tex", "hexsha": "58afbf1ef8f5fe24674470a991f0a17dc4e0cc28", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "42b86dc246d4870b28b99ee0b9ddc33f35e077a9", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "mishrasumit09/econ502", "max_forks_repo_path": "slides/Lecture06.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "42b86dc246d4870b28b99ee0b9ddc33f35e077a9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "mishrasumit09/econ502", "max_issues_repo_path": "slides/Lecture06.tex", "max_line_length": 261, "max_stars_count": null, "max_stars_repo_head_hexsha": "42b86dc246d4870b28b99ee0b9ddc33f35e077a9", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "mishrasumit09/econ502", "max_stars_repo_path": "slides/Lecture06.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4476, "size": 14857 }
% $Header$ \documentclass{beamer} % This file is a solution template for: % - Giving a talk on some subject. % - The talk is between 15min and 45min long. % - Style is ornate. % Copyright 2004 by Till Tantau <[email protected]>. % % In principle, this file can be redistributed and/or modified under % the terms of the GNU Public License, version 2. % % However, this file is supposed to be a template to be modified % for your own needs. For this reason, if you use this file as a % template and not specifically distribute it as part of a another % package/program, I grant the extra permission to freely copy and % modify this file as you see fit and even to delete this copyright % notice. \mode<presentation> { \usetheme{Pittsburgh} \usecolortheme{dove} \usefonttheme{professionalfonts} \setbeamertemplate{blocks}[rounded][shadow=false] \setbeamercolor{block title}{fg=structure,bg=white} } \usepackage[english]{babel} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{CJKutf8} \usepackage{amsfonts} \usepackage{color} \usepackage{pstricks,pst-text} \usepackage{pst-node,pst-tree} \usepackage{concrete} \usepackage[T1]{fontenc} \usepackage{amsmath}% \usepackage{amsthm} \usepackage{amssymb}% \usepackage{graphicx} \usepackage{float} \usepackage{caption} \usepackage{bbold} % Or whatever. Note that the encoding and the font should match. If T1 % does not look nice, try deleting the line with the fontenc. \title{Algebraic generating functions for languages avoiding Riordan patterns\footnote{\scriptsize submitted to the \emph{Journal of Integer Sequences} in January and recently reviewed} and\\ matrices functions for the Riordan group} %\subtitle {Presentation Subtitle} % (optional) \author[Merlini, Nocentini] % (optional, use only with lots of authors) {Massimo Nocentini} % - Use the \inst{?} command only if the authors have different % affiliation. \institute[Universities of Somewhere and Elsewhere] % (optional, but mostly needed) { Dipartimento di Statistica, Informatica, Applicazioni \\ University of Florence, Italy } % - Use the \inst command only if there are several affiliations. % - Keep it simple, no one is interested in your street address. \date{\today} \subject{ We study the languages $\mathfrak{L}^{[\mathfrak{p}]}\subset \{0,1\}^*$ of binary words $w$ avoiding a given pattern $\mathfrak{p}$ such that $|w|_0\leq |w|_1$ for any $w\in \mathfrak{L}^{[\mathfrak{p}]},$ where $|w|_0$ and $|w|_1$ correspond to the number of bits $1$ and $0$ in the word $w$, respectively. In particular, we concentrate on patterns $\mathfrak{p}$ related to the concept of Riordan arrays. These languages are not regular and can be enumerated by algebraic generating functions corresponding to many integer sequences which are unknown in the OEIS. We give explicit formulas for these generating functions expressed in terms of the autocorrelation polynomial of $\mathfrak{p}$ and also give explicit formulas for the coefficients of some particular patterns, algebraically and combinatorially.} % This is only inserted into the PDF information catalog. Can be left % out. % If you have a file called "university-logo-filename.xxx", where xxx % is a graphic format that can be processed by latex or pdflatex, % resp., then you can add a logo as follows: % \pgfdeclareimage[height=0.5cm]{university-logo}{university-logo-filename} % \logo{\pgfuseimage{university-logo}} % Delete this, if you do not want the table of contents to pop up at % the beginning of each subsection: \AtBeginSection[] { \begin{frame}<beamer>{Outline} \tableofcontents[currentsection] \end{frame} } % If you wish to uncover everything in a step-wise fashion, uncomment % the following command: %\beamerdefaultoverlayspecification{<+->} \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame}{Outline} \tableofcontents % You might wish to add the option [pausesections] \end{frame} % Since this a solution template for a generic talk, very little can % be said about how it should be structured. However, the talk length % of between 15min and 45min and the theme suggest that you stick to % the following rules: % - Exactly two or three sections (other than the summary). % - At *most* three subsections per section. % - Talk about 30s to 2min per frame. So there should be between about % 15 and 30 frames, all told. \section{Introduction} \begin{frame} \frametitle{Definition in terms of $d(t)$ and $h(t)$} \begin{itemize} \item A \emph{Riordan array} $\mathcal{R}$ is a pair $$\mathcal{R}=(d(t),\ h(t))$$ where $d(t)$ and $h(t)$ are formal power series in the undeterminate $t$, such that $d(0)\neq 0$ and $h(0)= 0$ \item if $h^\prime(0)\neq 0$ the Riordan array is called \emph{proper} \item $\mathcal{R}$ denotes an infinite, lower triangular array $(d_{n,k})_{n,k\in N}$ where: $$d_{n,k}=[t^n]d(t)h(t)^k$$ \end{itemize} \end{frame} \section{Binary words avoiding patterns} \begin{frame} \frametitle{Binary words avoiding a pattern} \begin{itemize} \item We consider the language $\mathcal{L}^{[\mathfrak{p}]}$ of binary words with no occurrence of a pattern $\mathfrak{p}=p_0\cdots p_{h-1}$ \item The problem of determining the generating function counting the number of words \emph{with respect to their length} has been studied by several authors: \begin{enumerate} \item L.~J. Guibas and M.~Odlyzko. Long repetitive patterns in random sequences. {\em Zeitschrift f\"{u}r Wahrscheinlichkeitstheorie}, 53:241--262, 1980. \item R.~Sedgewick and P.~Flajolet. {\em An {I}ntroduction to the {A}nalysis of {A}lgorithms}. Addison-Wesley, Reading, MA, 1996. \end{enumerate} \item The fundamental notion is that of the \emph{autocorrelation vector} of bits $c=(c_0,\ldots ,c_{h-1})$ associated to a given $\mathfrak{p}$ \end{itemize} \end{frame} \begin{frame} \frametitle{The pattern $\mathfrak{p}=10101$} \begin{center} \begin{tabular}{ccccc|cccccccc} $1$ & $0$ & $1$ & $0$ & $1$ & \multicolumn{6}{l}{Tails} & $c_{i}$ \\ \hline $1$ & $0$ & $1$ & $0$ & $1$ & & & & & & & $1$ \\ & $1$ & $0$ & $1$ & $0$ & $1$ & & & & & & $0$ \\ & & $1$ & $0$ & $1$ & $0$ & $1$ & & & & & $1$ \\ & & & $1$ & $0$ & $1$ & $0$ & $1$ & & & & $0$ \\ & & & & $1$ & $0$ & $1$ & $0$ & $1$ & & & $1$\\ \end{tabular} \end{center} The autocorrelation vector is then $c=(1,0,1,0,1)$ and $C^{[\mathfrak{p}]}(t)=1+t^{2}+t^{4}$ is the associated autocorrelation polynomial \end{frame} \begin{frame} \frametitle{Count respect bits $1$ and $0$} The gf counting the number $F_{n}$ of binary words with length $n$ not containing the pattern $\mathfrak{p}$ is \begin{displaymath} F(t) = \frac{C^{[\mathfrak{p}]}(t)}{t^{h} + (1-2t)C^{[\mathfrak{p}]}(t)} \end{displaymath} Taking into account the number of bits $1$ and $0$ in $\mathfrak{p}$: \begin{displaymath} F^{[\mathfrak{p}]}(x, y) = \frac{C^{[\mathfrak{p}]}(x,y)}{x^{n_{1}^{[\mathfrak{p}]}} y^{n_{0}^{[\mathfrak{p}]}} + (1-x-y)C^{[\mathfrak{p}]}(x, y)} \end{displaymath} where $h = {n_{0}^{[\mathfrak{p}]}}+{n_{1}^{[\mathfrak{p}]}}$ and $C^{[\mathfrak{p}]}(x, y)$ is the bivariate autocorrelation polynomial. Moreover, $F_{n, k}^{[\mathfrak{p}]} =[x^n y^k]F^{[\mathfrak{p}]}(x,y)$ denotes the number of binary words avoiding the pattern $\mathfrak{p}$ with $n$ bits $1$ and $k$ bits $0$ \end{frame} \begin{frame}\frametitle{An example with $\mathfrak{p}= 10101$} Since $C^{[\mathfrak{p}]}(x,y)=1+xy+x^2y^2$ we have: $$F^{[\mathfrak{p}]}(x,y)={1+xy+x^2y^2 \over (1-x-y)(1+xy+x^2y^2)+x^3y^2}. $$ $$ \begin{array}{c|cccccccc} n/k & 0 & 1 & 2 & 3 & 4 &5 &6 &7 \\ \cline{1-9} 0 & {\bf\color{red} 1} & {\bf\color{green} 1} & {\bf\color{green}1} & {\bf\color{green}1}& {\bf\color{green}1} & {\bf\color{green}1} & {\bf\color{green}1} & {\bf\color{green}1}\\ 1 & {\bf\color{blue} 1} & {\bf\color{red}2} & 3 & 4 & 5 & 6 & 7 & 8 \\ 2 & {\bf\color{blue} 1} & 3 & {\bf\color{red}6} & 10 & 15 & 21 & 28 & 36\\ 3 & {\bf\color{blue} 1} & 4 & 9 & {\bf\color{red}18} & 32 & 52& 79 & 114\\ 4 & {\bf\color{blue} 1} & 5 & 13 & 30 & {\bf\color{red}60} & 109& 184 & 293\\ 5 & {\bf\color{blue} 1} & 6 & 18 & 46 & 102 & {\bf\color{red}204} & 377 & 654\\ 6 & {\bf\color{blue} 1} & 7 & 24 & 67 & 163 & 354& {\bf\color{red}708} & 1324\\ 7 & {\bf\color{blue} 1} & 8 & 31 & 94 & 248 &580& 1245 &{\bf\color{red}2490} \end{array} $$ \end{frame} \begin{frame}\frametitle{...the lower and upper triangular parts} \begin{columns} \begin{column}{5cm} $$ \begin{array}{c|cccccc} n/k & 0 & 1 & 2 & 3 & 4 &5 \\ \cline{1-7} 0 & {\bf\color{red}1} & & & & & \\ 1 & {\bf\color{red}2} & {\bf\color{blue} 1} & & & & \\ 2 & {\bf\color{red}6}& 3 & {\bf\color{blue} 1} & & & \\ 3 & {\bf\color{red}18} & 9 & 4 & {\bf\color{blue} 1} & & \\ 4 & {\bf\color{red}60} & 30 & 13 & 5 & {\bf\color{blue} 1}& \\ 5 & {\bf\color{red}204} & 102 & 46 & 18 & 6 &{\bf\color{blue} 1} \\ \end{array} $$ $(n,k)\mapsto(n,n-k)$ if $k\leq n$ \end{column} \begin{column}{5cm} $$ \begin{array}{c|cccccc} n/k & 0 & 1 & 2 & 3 & 4 &5 \\ \cline{1-7} 0 & {\bf\color{red}1} & & & & & \\ 1 & {\bf\color{red}2} & {\bf\color{green}1} & & & & \\ 2 & {\bf\color{red}6}& 3 & {\bf\color{green}1} & & & \\ 3 & {\bf\color{red}18} & 10 & 4 & {\bf\color{green}1} & & \\ 4 & {\bf\color{red}60} & 32 & 15 & 5 & {\bf\color{green}1}& \\ 5 & {\bf\color{red}204} & 109 & 52 & 21 & 6 &{\bf\color{green}1} \\ \end{array} $$ $(n,k)\mapsto(k,k-n)$ if $n\leq k$ \end{column} \end{columns} \end{frame} \begin{frame}\frametitle{Matrices ${{R}^{[\mathfrak{p}]}}$ and ${{R}^{[\bar{\mathfrak{p}]}}}$} \begin{itemize} \item Let $R_{n,k}^{[\mathfrak{p}]}=F_{n,n-k}^{[\mathfrak{p}]}$ with $k\leq n.$ In other words, $R_{n,k}^{[\mathfrak{p}]}$ counts the number of words avoiding $\mathfrak{p}$ with $n$ bits $1$ and $n-k$ bits $0$ \item Let $\bar{\mathfrak{p}}=\bar{p}_{0}\ldots\bar{p}_{h-1}$ be the $\mathfrak{p}$'s conjugate, where $\bar{p}_{i} = 1-p_{i}$ \item We obviously have $R_{n,k}^{[\bar{\mathfrak{p}}]}=F_{n,n-k}^{[\bar{\mathfrak{p}}]}=F_{k,k-n}^{[\mathfrak{p}]}$. Therefore, the matrices ${{R}^{[\mathfrak{p}]}}$ and ${{R}^{[\bar{\mathfrak{p}]}}}$ represent the lower and upper triangular part of the array ${{F}^{[\mathfrak{p}]}},$ respectively \end{itemize} \end{frame} \section{Riordan patterns} \begin{frame}\frametitle{Riordan patterns {\tiny [MS11]}} \begin{itemize} \item When matrices ${{R}^{[\mathfrak{p}]}}$ and ${{R}^{[\bar{\mathfrak{p}]}}}$ are (both) Riordan arrays? \item We say that $\mathfrak{p}=p_0...p_{h-1}$ is a Riordan pattern if and only if $$C^{[\mathfrak{p}]}(x,y)=C^{[\mathfrak{p}]}(y,x)= \sum_{i=0}^{\lfloor(h-1)/2\rfloor}c_{2i}x^iy^i$$ provided that $\left|n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}\right|\in \left\{0,1\right\}$ \end{itemize} \begin{enumerate} \item {\small D. Merlini and R. Sprugnoli. Algebraic aspects of some Riordan arrays related to binary words avoiding a pattern. {\em Theoretical Computer Science}, 412 (27), 2988-3001, 2011.} \end{enumerate} \end{frame} %\begin{frame}\frametitle{Theorem 1} %The matrices ${\cal{R}^{[\mathfrak{p}]}}$ and %${\cal{R}^{[\bar{\mathfrak{p}]}}}$ are both Riordan arrays %${\cal{R}^{[\mathfrak{p}]}}=(d^{[\mathfrak{p}]}(t),h^{[\mathfrak{p}]}(t))$ % and ${\cal{R}^{[\bar{\mathfrak{p}]}}}=(d^{[\bar{\mathfrak{p}}]}(t),h^{[\bar{\mathfrak{p}}]}(t))$ %if and only if $\mathfrak{p}$ is a Riordan pattern. Moreover we have: %\begin{alertblock}{ } %$$d^{[\mathfrak{p}]}(t)=d^{[\bar{\mathfrak{p}}]}(t)= %[x^0]F\left(x,\dfrac{t}{x}\right)=\dfrac{1}{2\pi i}\displaystyle\oint{F\left(x,\dfrac{t}{x}\right)\dfrac{dx}{x}} %$$ %and {\tiny $$h^{[\mathfrak{p}]}(t)={1- \sum_{i = %0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,1}t^{i+1}- %\sqrt{(1-\sum_{i=0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,1}t^{i+1})^2 %- 4\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,0}t^{i+1}(\sum_{i %= 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,2}t^{i+1}+1)} \over %2(\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,2}t^{i+1}+1)} $$} %\end{alertblock} %\end{frame} \begin{frame}\frametitle{Theorem 1} Matrices \begin{displaymath} {{R}^{[\mathfrak{p}]}}=(d^{[\mathfrak{p}]}(t),h^{[\mathfrak{p}]}(t)), \quad {{R}^{[\bar{\mathfrak{p}]}}}=(d^{[\bar{\mathfrak{p}}]}(t),h^{[\bar{\mathfrak{p}}]}(t)) \end{displaymath} are both RAs $\leftrightarrow$ $\mathfrak{p}$ is a Riordan pattern. \begin{block}{} By specializing this result to the cases $\left|n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}\right|\in \{0,1\}$ and by setting $C^{[\mathfrak{p}]}(t)=C^{[\mathfrak{p}]}\left(\sqrt{t},\sqrt{t}\right)=\sum_{i \geq 0}c_{2i}t^i,$ we have explicit formulae for both functions $d(t)$ and $h(t)$ wrt polynomial $C^{[\mathfrak{p}]}(t)$. \end{block} \end{frame} \begin{frame}\frametitle{Theorem 1: the case $n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}=1$} $$d^{[\mathfrak{p}]}(t)={C^{[\mathfrak{p}]}(t) \over \sqrt{C^{[\mathfrak{p}]}(t)^2-4tC^{[\mathfrak{p}]}(t)(C^{[\mathfrak{p}]}(t)-t^{n_0^{\mathfrak{p}}})}}, $$ $$h^{[\mathfrak{p}]}(t)={C^{[\mathfrak{p}]}(t) -\sqrt{C^{[\mathfrak{p}]}(t)^2-4tC^{[\mathfrak{p}]}(t)(C^{[\mathfrak{p}]}(t)-t^{n_0^{\mathfrak{p}}})} \over 2 C^{[\mathfrak{p}]}(t)}.$$ \end{frame} \begin{frame}\frametitle{Theorem 1: the case $n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}=0$} $$d^{[\mathfrak{p}]}(t)={C^{[\mathfrak{p}]}(t) \over \sqrt{( C^{[\mathfrak{p}]}(t)+t^{n_0^{\mathfrak{p}}})^2-4tC^{[\mathfrak{p}]}(t)^2}}, $$ $$h^{[\mathfrak{p}]}(t)= {C^{[\mathfrak{p}]}(t)+ t^{n_0^{\mathfrak{p}}} - \sqrt{( C^{[\mathfrak{p}]}(t)+t^{n_0^{\mathfrak{p}}})^2-4tC^{[\mathfrak{p}]}(t)^2} \over 2 C^{[\mathfrak{p}]}(t)}.$$ \end{frame} \begin{frame}\frametitle{Theorem 1: the case $n_0^{[\mathfrak{p}]}-n_1^{[\mathfrak{p}]}=1$} $$d^{[\mathfrak{p}]}(t)={C^{[\mathfrak{p}]}(t) \over \sqrt{C^{[\mathfrak{p}]}(t)^2-4tC^{[\mathfrak{p}]}(t)(C^{[\mathfrak{p}]}(t)-t^{n_1^{\mathfrak{p}}})}}, $$ $$h^{[\mathfrak{p}]}(t)={C^{[\mathfrak{p}]}(t) -\sqrt{C^{[\mathfrak{p}]}(t)^2-4tC^{[\mathfrak{p}]}(t)(C^{[\mathfrak{p}]}(t)-t^{n_1^{\mathfrak{p}}})} \over 2 (C^{[\mathfrak{p}]}(t)- t^{n_1^{\mathfrak{p}}})}.$$ \end{frame} %\begin{frame}\frametitle{Theorem 1 -b-} %... where $\delta_{i,j}$ is the Kronecker delta, %$$\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,0}t^{i}=\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} c_{2i}t^{i}- %\delta_{-1,n_0^{\mathfrak{p}}-n_1^{\mathfrak{p}}}t^{n_1^{\mathfrak{p}}-1}, $$ %$$\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,1}t^{i}=-\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} c_{2(i+1)}t^{i}- %\delta_{0,n_0^{\mathfrak{p}}-n_1^{\mathfrak{p}}}t^{n_1^{\mathfrak{p}}-1}, $$ %$$\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} \alpha_{i,2}t^{i}=\sum_{i = 0}^{n_1^{\mathfrak{p}}-1} c_{2(i+1)}t^{i}- %\delta_{1,n_0^{\mathfrak{p}}-n_1^{\mathfrak{p}}}t^{n_1^{\mathfrak{p}}-1},$$ and the coefficients $c_i$ are given by the autocorrelation vector %of $\mathfrak{p}.$ % An analogous formula holds for $h^{[\bar{\mathfrak{p}}]}(t)$. %\end{frame} \begin{frame}\frametitle{Classes of patterns} \begin{itemize} \item $\mathfrak{p}=1^{j+1}0^j$ \item $\mathfrak{p}=0^{j+1}1^j$ \item $\mathfrak{p}=1^{j}0^j$ and $\mathfrak{p}=0^{j}1^j$ \item $\mathfrak{p}=(10)^j1$ \item $\mathfrak{p}=(01)^j0$ \end{itemize} \end{frame} \begin{frame}\frametitle{A combinatorial interpretation for $\mathfrak{p}=10$} In this case we get the RA ${\mathcal{R}^{[10]}} = \left(d^{[10]}(t), h^{[10]}(t)\right)$ such that \begin{displaymath} d^{[10]}(t)=\frac{1}{1-t} \quad \text{and} \quad h^{[10]}(t) = t, \end{displaymath} so the number $R_{n, 0}^{[10]}$ of words containing $n$ bits $1$ and $n$ bits $0$, avoiding pattern $\mathfrak{p}=10$, is $[t^{n}] d^{[10]}(t) = 1$ for $n\in\mathbb{N}$. In terms of lattice paths this corresponds to the fact that there is exactly one \emph{valley}-shaped path having $n$ steps of both kinds $\diagup$ and $\diagdown$, avoiding $\mathfrak{p}=10$ and terminating at coordinate $(2n, 0)$ for each $n\in\mathbb{N}$, formally the path $0^{n}1^{n}$. \end{frame} \iffalse \begin{frame}\frametitle{A Lemma} Let $\mathfrak{p}$ be a Riordan pattern. Then the Riordan array ${{R}^{[\mathfrak{p}]}}$ is characterized by the $A$-matrix defined by the following relation: $$R_{n+1,k+1}^{[\mathfrak{p}]}=R_{n,k}^{[\mathfrak{p}]} +R_{n+1,k+2}^{[\mathfrak{p}]}-R_{n+1-n_1^{\mathfrak{p}},k+1+n_0^{\mathfrak{p}}-n_1^{\mathfrak{p}}}^{[\mathfrak{p}]} +$$ $$- \sum_{i\geq 1} c_{2i}\left( R_{n+1-i,k+1}^{[\mathfrak{p}]} -R_{n-i,k}^{[\mathfrak{p}]} -R_{n+1-i,k+2}^{[\mathfrak{p}]} \right),$$ where the $c_i$ are given by the autocorrelation vector of $\mathfrak{p}.$ \end{frame} \fi \section{The $|w|_{0}\leq |w|_{1}$ constraint} \begin{frame}\frametitle{The $|w|_{0}\leq |w|_{1}$ constraint} \begin{itemize} \item let $|w|_{i}$ be the number of bits $i$ in word $w$ \item enumeration of binary words avoiding a pattern $\mathfrak{p}$, without the constraint $|w|_0\leq |w|_1,$ gives a {\bf \red rational} bivariate generating function for the sequence $F^{[\mathfrak{p}]}_n=\sum_{k=0}^nF_{n,k}^{[\mathfrak{p}]}$ \item under the restriction such that words have to have no more bits $0$ than bits $1$, then the language is no longer regular and its enumeration becomes more difficult \item using gf $R^{[\mathfrak{p}]}(x,y)$ and the fundamental theorem of RAs: $$\sum_{k=0}^n d_{n,k}f_k=[t^n]d(t)f(h(t)) $$ we obtain many {\bf \red new algebraic generating functions} expressed in terms of the autocorrelation polynomial of $\mathfrak{p}$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 2: the case $n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}=1$} Recall that \begin{displaymath} R^{[\mathfrak{p}]}(t,w)=\sum_{n,k\in\mathbb{N}} R_{n, k}^{[\mathfrak{p}]}t^n w^k={d^{[\mathfrak{p}]}(t) \over 1-wh^{[\mathfrak{p}]}(t)} \end{displaymath} Let $S^{[\mathfrak{p}]}(t)=\sum_{n\geq 0}S_n^{[\mathfrak{p}]}t^n$ be the gf enumerating the set of binary words $\left\lbrace w\in\mathcal{L}^{[\mathfrak{p}]} : |w|_0\leq |w|_1\right\rbrace$ according to {\bf \red the number of bits $1$} \begin{itemize} \item if $n_1^{[\mathfrak{p}]}=n_0^{[\mathfrak{p}]}+1:$ $$S^{[\mathfrak{p}]}(t)={2C^{[\mathfrak{p}]}(t) \over \sqrt{Q(t)}\left(\sqrt{C^{[\mathfrak{p}]}(t)}+ \sqrt{Q(t)} \right)} $$ where $Q(t)={(1-4t)C^{[\mathfrak{p}]}(t)^2+4t^{n_1^{[\mathfrak{p}]}}}$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 2: the case $n_0^{[\mathfrak{p}]}-n_1^{[\mathfrak{p}]}=1$} \begin{itemize} \item if $n_0^{[\mathfrak{p}]}=n_1^{[\mathfrak{p}]}+1:$ $$S^{[\mathfrak{p}]}(t)={2C^{[\mathfrak{p}]}(t)(C^{[\mathfrak{p}]}(t)-t^{n_1^{[\mathfrak{p}]}} ) \over \sqrt{Q(t)} \left(C^{[\mathfrak{p}]}(t)-2t^{n_1^{[\mathfrak{p}]}}+ \sqrt{Q(t)} \right) }$$ where $Q(t)={ (1-4t)C^{[\mathfrak{p}]}(t)^2+4t^{n_0^{[\mathfrak{p}]}}C^{[\mathfrak{p}]}(t)}$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 2: the case $n_0^{[\mathfrak{p}]}-n_1^{[\mathfrak{p}]}=0$} \begin{itemize} \item if $n_1^{[\mathfrak{p}]}=n_0^{[\mathfrak{p}]}:$ $$S^{[\mathfrak{p}]}(t)={2C^{[\mathfrak{p}]}(t)^2 \over \sqrt{Q(t)} \left(C^{[\mathfrak{p}]}(t)-t^{n_0^{[\mathfrak{p}]}}+ \sqrt{Q(t)} \right) }$$ where $Q(t)=(1-4t)C^{[\mathfrak{p}]}(t)^2+2t^{n_0^{[\mathfrak{p}]}}C^{[\mathfrak{p}]}(t)+t^{2n_0^{[\mathfrak{p}]}}$ \end{itemize} \begin{proof} Observe that $S^{[\mathfrak{p}]}(t)=R^{[\mathfrak{p}]}(t,1),$ or, equivalently, that $S_n^{[\mathfrak{p}]}=\sum_{k=0}^nR_{n, k}^{[\mathfrak{p}]}$ and apply the fundamental rule with $f_k=1$. \end{proof} \end{frame} \begin{frame}\frametitle{Theorem 3: the case $n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}=1$} Let $L^{[\mathfrak{p}]}(t)=\sum_{n\geq 0}L_n^{[\mathfrak{p}]}t^n$ be the gf enumerating the set of binary words $\left\lbrace w\in\mathcal{L}^{[\mathfrak{p}]} : |w|_0\leq |w|_1\right\rbrace$ according to {\bf \red the length} \begin{itemize} \item if $n_1^{[\mathfrak{p}]}=n_0^{[\mathfrak{p}]}+1:$ $$L^{[\mathfrak{p}]}(t)= {2tC^{[\mathfrak{p}]}(t^2)^2 \over \sqrt{Q(t)}\left((2t-1)C(t^2)+ \sqrt{ Q(t) } \right)}$$ where $Q(t)=C^{[\mathfrak{p}]}(t^2)\left( (1-4t^2)C^{[\mathfrak{p}]}(t^2)+4t^{2n_1^{[\mathfrak{p}]}}\right)$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 3: the case $n_0^{[\mathfrak{p}]}-n_1^{[\mathfrak{p}]}=1$} \begin{itemize} \item if $n_0^{[\mathfrak{p}]}=n_1^{[\mathfrak{p}]}+1:$ $$L^{[\mathfrak{p}]}(t)={2t\sqrt{C^{[\mathfrak{p}]}(t^2)}(t^{2n_1^{[\mathfrak{p}]}}-C^{[\mathfrak{p}]}(t^2)) \over \sqrt{ Q(t) }\left((1-2t)C^{[\mathfrak{p}]}(t^2)+ B(t) - \sqrt{C^{[\mathfrak{p}]}(t^2) Q(t) } \right)}$$ where $Q(t)=(1-4t^2)C^{[\mathfrak{p}]}(t^2)+4t^{2n_0^{[\mathfrak{p}]}}$ and $B(t)=2t^{n_0^{[\mathfrak{p}]} +n_1^{[\mathfrak{p}]}}$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 3: the case $n_1^{[\mathfrak{p}]}-n_0^{[\mathfrak{p}]}=0$} \begin{itemize} \item if $n_1^{[\mathfrak{p}]}=n_0^{[\mathfrak{p}]}:$ $$L^{[\mathfrak{p}]}(t)= {2tC^{[\mathfrak{p}]}(t^2)^2 \over \sqrt{ Q(t) }\left((2t-1)C(t^2)-t^{2n_0^{[\mathfrak{p}]}} + \sqrt{ Q(t) } \right)}$$ where $Q(t)=(1-4t^2)C^{[\mathfrak{p}]}(t^2)^2+2t^{2n_0^{[\mathfrak{p}]}}C^{[\mathfrak{p}]}(t^2)+t^{4n_0^{[\mathfrak{p}]}}$ \end{itemize} \end{frame} \begin{frame}\frametitle{Theorem 3: proof} \begin{proof} Observe that the application of generating function $R^{[\mathfrak{p}]}(t, w)$ as \begin{displaymath} R^{[\mathfrak{p}]}\left(tw,{1 \over w}\right)=\sum_{n,k\in\mathbb{N}} R_{n, k}^{[\mathfrak{p}]}t^n w^{n-k} \end{displaymath} entails that $[t^{r}w^{s}]R^{[\mathfrak{p}]}\left(tw,{1 \over w}\right)=R_{r, r-s}^{[\mathfrak{p}]}$ which is the number of binary words with $r$ bits $1$ and $s$ bits $0$. To enumerate according to the length let $t=w$, therefore $$L^{[\mathfrak{p}]}(t)=\sum_{n\geq 0}L_n^{[\mathfrak{p}]}t^n=R^{[\mathfrak{p}]}\left(t^2,\frac{1}{t}\right)$$ \end{proof} \end{frame} \section{Series developments and closed formulae} \begin{frame}\frametitle{Series development for $S^{[1^{j+1}0^{j}]}(t)$} {\tiny \begin{table} \begin{equation*}\begin{array}{c|cccccccccccc}j/n & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11\\\hline0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\1 & 1 & 3 & 7 & 15 & 31 & 63 & 127 & 255 & 511 & 1023 & 2047 & 4095\\2 & 1 & 3 & 10 & 32 & 106 & 357 & 1222 & 4230 & 14770 & 51918 & 183472 & 651191\\3 & 1 & 3 & 10 & 35 & 123 & 442 & 1611 & 5931 & 22010 & 82187 & 308427 & 1162218\\4 & 1 & 3 & 10 & 35 & 126 & 459 & 1696 & 6330 & 23806 & 90068 & 342430 & 1307138\\5 & 1 & 3 & 10 & 35 & 126 & 462 & 1713 & 6415 & 24205 & 91874 & 350406 & 1341782\\6 & 1 & 3 & 10 & 35 & 126 & 462 & 1716 & 6432 & 24290 & 92273 & 352212 & 1349768\\7 & 1 & 3 & 10 & 35 & 126 & 462 & 1716 & 6435 & 24307 & 92358 & 352611 & 1351574\\8 & 1 & 3 & 10 & 35 & 126 & 462 & 1716 & 6435 & 24310 & 92375 & 352696 & 1351973\end{array}\end{equation*} \begin{displaymath} \begin{split} [t^{3}]S^{[110]}(t) &= \big|\lbrace 111, 0111, 1011, 00111, 01011, 10011, 10101, 000111, \\ & 001011, 010011, 010101, 100011, 100101, 101001, 101010\rbrace\big| = 15 \end{split} \end{displaymath} \caption{Some series developments for $S^{[1^{j+1}0^j]}(t)$ and the set of words with $n=3$ bits $1$, avoiding pattern $\mathfrak{p}=110$, so $j=1$ in the family; moreover, for $j=1$ the sequence corresponds to $A000225$, for $j=2$ the sequence corresponds to $A261058$.} \end{table} } \end{frame} \begin{frame}\frametitle{Series development for $L^{[1^{j+1}0^{j}]}(t)$} {\tiny \begin{table} \begin{equation*}\begin{array}{c|ccccccccccccccc}j/n & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14\\\hline0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\1 & 1 & 1 & 3 & 3 & 7 & 7 & 15 & 15 & 31 & 31 & 63 & 63 & 127 & 127 & 255\\2 & 1 & 1 & 3 & 4 & 11 & 15 & 38 & 55 & 135 & 201 & 483 & 736 & 1742 & 2699 & 6313\\3 & 1 & 1 & 3 & 4 & 11 & 16 & 42 & 63 & 159 & 247 & 610 & 969 & 2354 & 3802 & 9117\\4 & 1 & 1 & 3 & 4 & 11 & 16 & 42 & 64 & 163 & 255 & 634 & 1015 & 2482 & 4041 & 9752\\5 & 1 & 1 & 3 & 4 & 11 & 16 & 42 & 64 & 163 & 256 & 638 & 1023 & 2506 & 4087 & 9880\\6 & 1 & 1 & 3 & 4 & 11 & 16 & 42 & 64 & 163 & 256 & 638 & 1024 & 2510 & 4095 & 9904\\7 & 1 & 1 & 3 & 4 & 11 & 16 & 42 & 64 & 163 & 256 & 638 & 1024 & 2510 & 4096 & 9908\end{array}\end{equation*} \caption{Some series developments for $L^{[1^{j+1}0^j]}(t)$; moreover, for $j=1$ the sequence corresponds to $A052551$.} \end{table} } \end{frame} \begin{frame}\frametitle{Closed formulae for particular cases} When the parameter $j$ for a pattern $\mathfrak{p}$ assumes values $0$ and $1$ it is possible to find closed formulae for coefficients $S_{n}^{[\mathfrak{p}]}$ and $L_{n}^{[\mathfrak{p}]}$; moreover, in a recent submitted paper we give combinatorial interpretations, in terms of inversions in words and boxes occupancy, too. \begin{block}{$S_{n}^{[\mathfrak{p}]}$} \begin{displaymath} \begin{array}{c|ccc} j/\mathfrak{p} & {1^{j+1}0^{j}} & {0^{j+1}1^{j}} & {1^{j}0^{j}} \\ \hline 0 & [\![n = 0]\!] & 1 & { {2n+1}\choose{n} } \\ 1 & 2^{n+1} -1 & (n+2)2^{n-1} & n+1 \\ \end{array}{} \end{displaymath} \end{block} \end{frame} \begin{frame}\frametitle{Closed formulae for particular cases} \begin{block}{$L_{2m}^{[\mathfrak{p}]}$} \begin{displaymath} \begin{array}{c|ccc} j/\mathfrak{p} & {1^{j+1}0^{j}} & {0^{j+1}1^{j}} & {1^{j}0^{j}} \\ \hline 0 & [\![n = 0]\!] & 1 & 2^{2m-1} + \frac{1}{2}{ {2m}\choose{m} } \\ 1 & 2^{m+1} -1 & F_{2m+3}-2^{m} & m+1 \\ \end{array}{} \end{displaymath} \end{block} \begin{block}{$L_{2m+1}^{[\mathfrak{p}]}$} \begin{displaymath} \begin{array}{c|ccc} j/\mathfrak{p} & {1^{j+1}0^{j}} & {0^{j+1}1^{j}} & {1^{j}0^{j}} \\ \hline 0 & 0 & 1 & 2^{2m-1} \\ 1 & 2^{m+1} -1 & F_{2m+3}-2^{m+1} & m+1 \\ \end{array}{} \end{displaymath} \end{block} \end{frame} \begin{frame}{Summary} % Keep the summary *very short*. \begin{block}{Key points} \begin{itemize} \item split $F(t)$ in $F^{[\mathfrak{p}]}(x,y)$ to account for bits $1$ and $0$ \item ${{R}^{[\mathfrak{p}]}}$ and ${{R}^{[\bar{\mathfrak{p}]}}}$ are both RA $\leftrightarrow$ $\mathfrak{p}$ is a Riordan pattern. \item requiring $|w|_{0}\leq|w|_{1}$ entails \begin{displaymath} \begin{split} S^{[\mathfrak{p}]}(t)&=R^{[\mathfrak{p}]}(t,1)\rightarrow [t^{n}]S^{[\mathfrak{p}]}(t)= \left|\left\lbrace w \in \mathcal{L}^{[\mathfrak{p}]}: \begin{array}{l} |w|_{1} = n \\ |w|_{0}\leq|w|_{1} \end{array}\right\rbrace\right|\\ L^{[\mathfrak{p}]}(t)&=R^{[\mathfrak{p}]}\left(t^2,\frac{1}{t}\right)\rightarrow [t^{n}]L^{[\mathfrak{p}]}(t)= \left|\left\lbrace w \in \mathcal{L}^{[\mathfrak{p}]}: \begin{array}{l} |w| = n \\ |w|_{0}\leq|w|_{1} \end{array}\right\rbrace\right|\\ \end{split} \end{displaymath} \end{itemize} \end{block} \end{frame} \section{Matrices functions} \begin{frame}{Defs} Let $A\in\mathbb{C}^{m\times m}$ be a matrix and denote with $\sigma$ the spectre of $A$, formally $\sigma(A) = \lbrace \lambda_{i}: 1\leq i\leq \nu\rbrace$, with multiplicities $\lbrace m_{i}: 1\leq i\leq \nu\rbrace$, respectively, such that $\sum_{i=1}^{\nu}{m_{i}}=m$ \vfill We say that function $f$ \emph{is defined on the spectre $\sigma$ of matrix $A$} if exists $\left. \frac{\partial^{(j)}{f}}{\partial{z}} \right|_{z=\lambda_{i}}$ for $i\in \lbrace 1, \ldots, \nu \rbrace$, for $j \in \lbrace 0, \ldots, m_{i}-1 \rbrace$. \vfill Given a function $f$ defined on the spectre of a matrix $A$, define the polynomial $g$ such that it takes the same values of $f$ on the spectre of $A$, formally: $\left. \frac{\partial^{(j)}{f}}{\partial{z}} \right|_{z=\lambda_{i}} = \left. \frac{\partial^{(j)}{g}}{\partial{z}} \right|_{z=\lambda_{i}}$ for $i\in \lbrace 1, \ldots, \nu \rbrace$, for $j \in \lbrace 0, \ldots, m_{i}-1 \rbrace$, then $f(A) = g(A)$. \end{frame} \begin{frame}{Defs} \vfill Polynomial $g$ is an \emph{interpolating Hermite polynomial} which can be defined using the generalized Lagrange base $ \lbrace \Phi_{ij} \rbrace$, formally: \begin{displaymath} g(z) = \sum_{i=1}^{\nu}{\sum_{j=1}^{m_{i}}{ \left. \frac{\partial^{(j-1)}{f}}{\partial{z}} \right|_{z=\lambda_{i}}\Phi_{ij}(z) }} \end{displaymath} where each polynomial $\Phi_{ij}$, of degree $m-1$, is defined implicitly as the solution of the system: \begin{displaymath} \left. \frac{\partial^{(r-1)}{\Phi_{ij}}}{\partial{z}} \right|_{z=\lambda_{l}} = \delta_{il}\delta_{jr} \end{displaymath} for $l\in \lbrace 1, \ldots, \nu \rbrace$, for $r \in \lbrace 1, \ldots, m_{l} \rbrace$, where $\delta$ is the Kroneker delta, defined as $\delta_{ij}=1 \leftrightarrow i=j$. \end{frame} \begin{frame}{Defs} Let $\mathcal{R}_{m}\in\mathbb{C}^{m\times m}$ be a \emph{finite Riordan matrix}, hence $\sigma(\mathcal{R}_{m})= \lbrace \lambda_{1} \rbrace$, so $\nu=1$, where $\lambda_{1}=1$ with multiplicity $m_{1}=m$. \vfill Therefore, the generalized Lagrange base is composed of polynomials \begin{displaymath} \Phi_{1j}(z) = \sum_{k=0}^{j-1}{\frac{(-1)^{j-1-k}}{(j-1-k)!}\frac{z^{k}}{k!}} \end{displaymath} \vfill For the sake of clarity we show polynomials $\Phi_{1j}$ for $j\in \lbrace 1,\ldots,m \rbrace$ relative to any finite Riordan matrix $\mathcal{R}_{m}$ where $m=8$: {\tiny \begin{displaymath} \begin{array}{c} \Phi_{ 1, 1 }{\left (z \right )} = 1\\ \Phi_{ 1, 2 }{\left (z \right )} = z - 1\\ \Phi_{ 1, 3 }{\left (z \right )} = \frac{z^{2}}{2} - z + \frac{1}{2}\\ \Phi_{ 1, 4 }{\left (z \right )} = \frac{z^{3}}{6} - \frac{z^{2}}{2} + \frac{z}{2} - \frac{1}{6}\\ \Phi_{ 1, 5 }{\left (z \right )} = \frac{z^{4}}{24} - \frac{z^{3}}{6} + \frac{z^{2}}{4} - \frac{z}{6} + \frac{1}{24}\\ \Phi_{ 1, 6 }{\left (z \right )} = \frac{z^{5}}{120} - \frac{z^{4}}{24} + \frac{z^{3}}{12} - \frac{z^{2}}{12} + \frac{z}{24} - \frac{1}{120}\\ \Phi_{ 1, 7 }{\left (z \right )} = \frac{z^{6}}{720} - \frac{z^{5}}{120} + \frac{z^{4}}{48} - \frac{z^{3}}{36} + \frac{z^{2}}{48} - \frac{z}{120} + \frac{1}{720}\\ \Phi_{ 1, 8 }{\left (z \right )} = \frac{z^{7}}{5040} - \frac{z^{6}}{720} + \frac{z^{5}}{240} - \frac{z^{4}}{144} + \frac{z^{3}}{144} - \frac{z^{2}}{240} + \frac{z}{720} - \frac{1}{5040}\\ %\Phi_{ 1, 9 }{\left (z \right )} = \frac{z^{8}}{40320} - \frac{z^{7}}{5040} + \frac{z^{6}}{1440} - \frac{z^{5}}{720} + \frac{z^{4}}{576} - \frac{z^{3}}{720} + \frac{z^{2}}{1440} - \frac{z}{5040} + \frac{1}{40320}\\ %\Phi_{ 1, 10 }{\left (z \right )} = \frac{z^{9}}{362880} - \frac{z^{8}}{40320} + \frac{z^{7}}{10080} - \frac{z^{6}}{4320} + \frac{z^{5}}{2880} - \frac{z^{4}}{2880} + \frac{z^{3}}{4320} - \frac{z^{2}}{10080} + \frac{z}{40320} - \frac{1}{362880}\\ \end{array} \end{displaymath} } \end{frame} \begin{frame}{$f(z)=\frac{1}{z}$} The general form of $j$th derivative of function $f$ is $$\frac{\partial^{(j)}{f}(z)}{\partial{z}} = \frac{(-1)^{j}j!}{z^{j+1}}$$ therefore \begin{displaymath} g(z) = \sum_{j=1}^{m}{\sum_{k=0}^{j-1}{{{j-1}\choose{k}}(-z)^{k}}} = \sum_{k=0}^{m-1}{{{m}\choose{k+1}}(-z)^{k}} \end{displaymath} \vfill Polynomial $g$ where $m=8$ is defined according to \[g{\left (z \right )} = - z^{7} + 8 z^{6} - 28 z^{5} + 56 z^{4} - 70 z^{3} + 56 z^{2} - 28 z + 8\] \end{frame} \begin{frame}{$\mathcal{P}^{-1}$ and $\mathcal{C}^{-1}$} {\footnotesize \begin{displaymath} g(\mathcal{P})=\mathcal{P}^{-1}= \left[\begin{matrix}1 & & & & & & & \\-1 & 1 & & & & & & \\1 & -2 & 1 & & & & & \\-1 & 3 & -3 & 1 & & & & \\1 & -4 & 6 & -4 & 1 & & & \\-1 & 5 & -10 & 10 & -5 & 1 & & \\1 & -6 & 15 & -20 & 15 & -6 & 1 & \\-1 & 7 & -21 & 35 & -35 & 21 & -7 & 1\end{matrix}\right] \end{displaymath} \begin{displaymath} g(\mathcal{C})=\mathcal{C}^{-1}=\left[\begin{matrix}1 & & & & & & & \\-1 & 1 & & & & & & \\ & -2 & 1 & & & & & \\ & 1 & -3 & 1 & & & & \\ & & 3 & -4 & 1 & & & \\ & & -1 & 6 & -5 & 1 & & \\ & & & -4 & 1 & -6 & 1 & \\ & & & 1 & -1 & 15 & -7 & 1\end{matrix}\right] \end{displaymath} } \end{frame} \begin{frame}{Fibonacci numbers} \begin{displaymath} \mathcal{F} = \left[\begin{matrix}1 & 1\\1 & 0\end{matrix}\right], \quad \lambda_{1} = \frac{1}{2}- \frac{\sqrt{5}}{2} , \quad \lambda_{2} = \frac{1}{2} + \frac{\sqrt{5}}{2} \end{displaymath} \vfill \begin{displaymath} \Phi_{ 1, 1 }{\left (z \right )} = \frac{z}{\lambda_{1} - \lambda_{2}} - \frac{\lambda_{2}}{\lambda_{1} - \lambda_{2}}, \quad \Phi_{ 2, 1 }{\left (z \right )} = - \frac{z}{\lambda_{1} - \lambda_{2}} + \frac{\lambda_{1}}{\lambda_{1} - \lambda_{2}} \end{displaymath} \vfill \begin{displaymath} g{\left (z \right )} = z \left(\frac{\lambda_{1}^{r}}{\lambda_{1} - \lambda_{2}} - \frac{\lambda_{2}^{r}}{\lambda_{1} - \lambda_{2}}\right) + \frac{\lambda_{1} \lambda_{2}^{r}}{\lambda_{1} - \lambda_{2}} - \frac{\lambda_{1}^{r} \lambda_{2}}{\lambda_{1} - \lambda_{2}} \end{displaymath} \vfill \begin{displaymath} \mathcal{F}^{r}=\left[\begin{matrix}\frac{1}{\lambda_{1} - \lambda_{2}} \left(\lambda_{1} \lambda_{2}^{r} - \lambda_{1}^{r} \lambda_{2} + \lambda_{1}^{r} - \lambda_{2}^{r}\right) & \frac{\lambda_{1}^{r} - \lambda_{2}^{r}}{\lambda_{1} - \lambda_{2}}\\\frac{\lambda_{1}^{r} - \lambda_{2}^{r}}{\lambda_{1} - \lambda_{2}} & \frac{\lambda_{1} \lambda_{2}^{r} - \lambda_{1}^{r} \lambda_{2}}{\lambda_{1} - \lambda_{2}}\end{matrix}\right] \end{displaymath} \vfill \begin{displaymath} \mathcal{F}^{8} = \left[\begin{matrix}f_{9} & f_{8}\\f_{8} & f_{7}\end{matrix}\right] = \left[\begin{matrix}34 & 21\\21 & 13\end{matrix}\right] \end{displaymath} \end{frame} \begin{frame}{ } \begin{CJK}{UTF8}{mj} \Huge 고맙습니다 \end{CJK} \end{frame} \end{document}
{ "alphanum_fraction": 0.5968630019, "avg_line_length": 42.6383248731, "ext": "tex", "hexsha": "99035b8175255478b4626fa733562b6bf444b780", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ec20ba27d15006ba5b3dbcf0a38f20231238e9ae", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "massimo-nocentini/massimo-nocentini.github.io", "max_forks_repo_path": "PhD/second-year-summary/theo-stuff/agf-fm.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ec20ba27d15006ba5b3dbcf0a38f20231238e9ae", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "massimo-nocentini/massimo-nocentini.github.io", "max_issues_repo_path": "PhD/second-year-summary/theo-stuff/agf-fm.tex", "max_line_length": 824, "max_stars_count": null, "max_stars_repo_head_hexsha": "ec20ba27d15006ba5b3dbcf0a38f20231238e9ae", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "massimo-nocentini/massimo-nocentini.github.io", "max_stars_repo_path": "PhD/second-year-summary/theo-stuff/agf-fm.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14737, "size": 33599 }
\documentclass[12pt]{article} \usepackage[margin=1in]{geometry} \usepackage{setspace} \usepackage{graphicx} \usepackage{subcaption} \usepackage{amsmath} \usepackage{color} \usepackage{hyperref} \usepackage{multicol} \usepackage{framed} \usepackage{xcolor} \usepackage{wrapfig} \usepackage{float} \usepackage{fancyhdr} \usepackage{verbatim} \usepackage{textcomp} \pagestyle{fancy} \lfoot{\textbf{Open Source Rover Mechanical Assembly Manual}} \rfoot{Page \thepage} \lhead{\textbf{\leftmark}} \rhead{\textbf{\rightmark}} \cfoot{} \renewcommand{\footrulewidth}{1.8pt} \renewcommand{\headrulewidth}{1.8pt} \doublespacing \setlength{\parindent}{1cm} \begin{document} \title{Open Source Rover: Body Assembly Instructions} \author{Authors: Michael Cox, Eric Junkins, Olivia Lofaro} \makeatletter \def\@maketitle{ \begin{center} \makebox[\textwidth][c]{ \includegraphics[width=0.7\paperwidth]{"Pictures/Body/Body title".png}} {\Huge \bfseries \sffamily \@title }\\[3ex] {\Large \sffamily \@author}\\[3ex] \includegraphics[width=.65\linewidth]{"Pictures/Misc/JPL logo".png} \end{center}} \makeatother \maketitle \noindent {\footnotesize Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise, does not constitute or imply its endorsement by the United States Government or the Jet Propulsion Laboratory, California Institute of Technology. \textcopyright 2018 California Institute of Technology. Government sponsorship acknowledged.} % Introduction \newpage \tableofcontents \newpage \section{Machining/Fabrication} \subsection{Cut the Front Aluminum Plate} For the large aluminum plates that make up the main body of our rover, we will need two 9x12 inch plates (top and bottom), two 12x4.5 inch plates (left and right sides), and one 9x4.5 inch plate (for the front; the back panel will be made of laser cut acrylic and described later). However, the aluminum plates on our parts list only come in the 9x12 inch and 4.5x12 inch sizes. We will therefore need to custom cut the front 9x4.5 inch panel. Cut one panel to the dimensions given in Figure \ref{fb panel cut}. %\begin{figure}[H] % \centering % \begin{minipage}[b]{0.45\textwidth} % \includegraphics[width=\textwidth]{"Pictures/Fabrication/Front panel cut".PNG} % \end{minipage} % \hfill % \begin{minipage}[b]{0.45\textwidth} % \includegraphics[width=\textwidth]{"Pictures/Fabrication/Back panel cut".png} % \end{minipage} % \caption{Cutting the front and back panels of the body} % \label{fb panel cut} %\end{figure} \begin{figure}[H] \centering \includegraphics[width=0.7\textwidth]{"Pictures/Fabrication/Front panel cut".PNG} \caption{Cutting the front panel of the body} \label{fb panel cut} \end{figure} \subsection{Laser Cut Parts} In order to put the electronics inside the robot body we need an electronics board. Additionally, the back panel of the rover requires a couple custom cutouts accessing components like our voltage monitor and the USB ports on the Raspberry Pi. One simple and inexpensive solution for these parts is to order pieces of laser cut acrylic. In the Body Assembly folder in the repository, there is a folder called Laser Cut Parts. That folder contains two .DXF files which are 2D path files for a laser cutter. If you have a laser cutter, you may cut these parts yourselves. Also, there are many inexpensive laser cutting website services. An example of one of these sites is: \begin{itemize} \item \href{https://www.sculpteo.com}{https://www.sculpteo.com} \end{itemize} To get the above parts from Sculpteo, go to Laser cutting and then upload these files (with mm selected as units). Hit Next. Make sure scale is set to 100\%, change the material to Acrylic, have thickness to 1/8, and then select whatever color you wish. \subsection{9x12 Aluminum Plate Drilling} Next we need to drill a hole in one of the 9x12 Aluminum plates \textbf{S35} because we will need a hole of just over 0.5 in diameter for the differential pivot mount. There is already a small hole drilled in the location we want to use, but it needs to be widened substantially. Start with the drill \# 23 and drill the hole shown by Figure \ref{Drilling the Al plate}. Repeat this with drill sizes stepping up until you get to a drill of 0.5 in. Take the 0.5 in hollow rod \textbf{S19} and make sure it spins freely in the hole you have created. If it does not, drill the hole slightly larger or sand/file the hole until the rod spins with no resistance.\footnote{The 0.5 in hollow rod must spin \textit{freely} while mounted inside the bearing blocks (See step 2.2 Differential pivot for example). It may help to follow step 2.2 in this document to test if you have enough clearance.} \begin{figure}[H] \centering \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Fabrication/9x12 Plate cut".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Fabrication/9x12 Plate cut2".png} \end{minipage} \caption{Drilling the Aluminum Plate} \label{Drilling the Al plate} \end{figure} \newpage \section{Mechanical/Structural Assembly} \subsection{Chassis} \begin{figure}[H] \centering \includegraphics[width=1\textwidth]{"Pictures/Body/Chassis Parts".PNG} \end{figure} \begin{enumerate} \item \textbf{Attach the channels to the Top panel: } Take the modified 9x12 Aluminum plate \textbf{S35A} and attach the four 1.5inch channel connectors \textbf{S1} using screws \textbf{B2} and hex nuts \textbf {B11} at each of the corners as shown in Figure \ref{channel to al plate}. Make sure to use the inner circle for these screws and not the outer ones where there won't be enough clearance for the hex nut. \begin{figure}[H] \centering \begin{minipage}[b]{0.20\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 1".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.30\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 1 b".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 2".PNG} \end{minipage} \caption{Attaching channels to aluminum plate} \label{channel to al plate} \end{figure} \item \textbf{Attach the side panels: } Attach the 4.5x12 plates \textbf{S37} to the channels using screws \textbf{B2} and hex nuts \textbf{B11}, again using the middle circle of holes for the screws and hex nuts. \begin{figure}[H] \centering \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 3 a".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 3 b".PNG} \end{minipage} \caption{Attach the side panels} \label{Body side panels} \end{figure} \item \textbf{Attach the PVC clamping hub:} Attach the 1-inch PVC bore clamping hub \textbf{S24} to the top plate of the body using screws \textbf{B1} wherever you would like your rover's "neck" to protrude from the body. We suggest using the location shown in Figure \ref{pvc to top plate}. \begin{figure}[H] \centering \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 11a".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.50\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 11b".PNG} \end{minipage} \caption{Attach the PVC clamp to top plate} \label{pvc to top plate} \end{figure} \end{enumerate} \subsection{Differential Pivot Block} The differential pivot is used to transfer weight off of the wheel that is currently climbing to the other front wheel, allowing the rover to climb more easily. Additionally, it serves as a second contact point for the rover's body such that it does not rotate freely about the cross rod. \begin{figure}[H] \centering \includegraphics[width=1\textwidth]{"Pictures/Body/Differential Parts".PNG} \end{figure} \begin{enumerate} \item \textbf{Mount the pillow bearing blocks:} Using spacers \textbf{T1}, screws \textbf{B6}, and hex nut \textbf{B11}, mount the pillow blocks \textbf{S11} to the top of the body over the hole in the aluminum plate that you drilled earlier as shown in Figure \ref{mount pillow blocks}. \begin{figure}[H] \centering \begin{minipage}[b]{0.30\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 5 a".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.55\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 5 b".PNG} \end{minipage} \caption{Mounting the pillow blocks} \label{mount pillow blocks} \end{figure} \end{enumerate} \subsection{Electronics Board} Next up is preparing the electronics plate. This plate holds all the electrical components, including the Raspberry Pi, all 5 RoboClaw Motor controllers, and the voltage regulator. \begin{figure}[H] \centering \includegraphics[width=1\textwidth]{"Pictures/Body/Electronics Board Parts".PNG} \end{figure} \begin{enumerate} \item \textbf{Attaching the Standoffs} There are a few different standoffs here. By using different standoff heights,we gain access to the micro USB port on each of the individual RoboClaws. The Raspberry Pi also has its own metric standoffs. In Figure \ref{standoffs}, the colors correspond to the following parts: \textcolor{green}{Green}:\textbf{T4}, \textcolor{blue}{Blue}:\textbf{T5}, \textcolor{pink}{Pink}:\textbf{T6}, \textcolor{cyan}{Cyan}:\textbf{T7}, \textcolor{yellow}{Yellow}:\textbf{T8}. Use the screw that corresponds to the spacer or standoff used. \begin{figure}[H] \centering \begin{minipage}[b]{0.50\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 9a".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.35\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Electronics Screw".PNG} \end{minipage} \caption{Electronics Board Step 1} \label{standoffs} \end{figure} \item \textbf{Mounting the Electronics:} Take the Raspberry Pi \textbf{E1}, RoboClaws \textbf{E2}, and voltage regulator \textbf{E4} and mount them in the locations shown in Figure \ref{electronics board 2}, again using the screws \textbf{B8 and B10} corresponding with each standoff. \begin{figure}[H] \centering \includegraphics[width=.65\linewidth]{"Pictures/Body/Step 9c".PNG} \caption{Electronics Board Step 2} \label{electronics board 2} \end{figure} \item \textbf{Mounting Electronics into Chassis:} Now that the electronics are on the plate, we can mount it into the chassis. Using screws \textbf{B3}, washers \textbf{W1} (3 washers per corner), and hex nuts \textbf{B11} attach the electronics board to the chassis at all four corners.\footnote{The washers give a small amount of extra space that is needed to fit the Voltage monitor in the system later.} \begin{figure}[H] \centering \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 8b".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Step 8a".PNG} \end{minipage} \caption{Electronics Board Step 3} \end{figure} \end{enumerate} Figure \ref{vm} shows how we mounted the volt meter as an example of how the pieces should fit together. If the volt meter does not fit in this gap, you can add additional washers from the previous step to space the electronics board farther from the top plate. \begin{figure}[H] \centering \includegraphics[width=0.6\textwidth]{"Pictures/Body/voltmeter".PNG} \caption{Volt meter and connectors mounted} \label{vm} \end{figure} \subsection{Closing the Body} \begin{enumerate} \begin{figure}[H] \centering \includegraphics[width=1\textwidth]{"Pictures/Body/Closing Body".PNG} \end{figure} \item \textbf{Attach the Dual Side Mounts:} Mount Dual Side Mounts A \textbf{S17} using screws \textbf{B1} in the locations shown in Figure \ref{Dual Side Mounts}. \begin{figure}[H] \centering \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Dual Side mounts".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Dual side mounts 2".PNG} \end{minipage} \caption{Dual Side Mount A locations} \label{Dual Side Mounts} \end{figure} \item \textbf{Attach the front/back panel: } Attach the Acrylic back panel \textbf{S37B} to the "back" of the body using screws \textbf{B2} (the "back" of the rover will be the side with the Raspberry Pi). The cutout should line up with the USB ports on the Pi. Repeat this with the aluminum plate for the front of the body with \textbf{S37A}. \begin{figure}[H] \centering \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Back panel 1".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.40\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Back panel 2".PNG} \end{minipage} \caption{Mounting the front/back panels} \label{front/back panels} \end{figure} \item \textbf{Attach the bottom panel: } Attach the 9x12 Aluminum Plate \textbf{S35} to close the bottom of the body using screws \textbf{B1}. At this point the body should be complete with the differential pivot mount, electronics, and chassis and should look similar to Figure \ref{finished body}. \begin{figure}[H] \centering \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Finished Body 1".PNG} \end{minipage} \hfill \begin{minipage}[b]{0.45\textwidth} \includegraphics[width=\textwidth]{"Pictures/Body/Finished Body 2".PNG} \end{minipage} \caption{Finished Body Assembly} \label{finished body} \end{figure} \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7500360386, "avg_line_length": 43.35625, "ext": "tex", "hexsha": "d91a4afece12b87d1da5a5a35efea99f04c55150", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-08-01T13:56:34.000Z", "max_forks_repo_forks_event_min_datetime": "2019-07-28T14:52:20.000Z", "max_forks_repo_head_hexsha": "9eb6c10de98a5a2f274035bf8eacf69d7b549cc9", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "crypticterminal/open-source-rover", "max_forks_repo_path": "Mechanical/Body Assembly/Latex Doc/Body Build Doc.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9eb6c10de98a5a2f274035bf8eacf69d7b549cc9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "crypticterminal/open-source-rover", "max_issues_repo_path": "Mechanical/Body Assembly/Latex Doc/Body Build Doc.tex", "max_line_length": 888, "max_stars_count": 2, "max_stars_repo_head_hexsha": "9eb6c10de98a5a2f274035bf8eacf69d7b549cc9", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "cnheider/open-source-rover", "max_stars_repo_path": "Mechanical/Body Assembly/Latex Doc/Body Build Doc.tex", "max_stars_repo_stars_event_max_datetime": "2019-06-27T11:59:50.000Z", "max_stars_repo_stars_event_min_datetime": "2018-08-08T03:06:17.000Z", "num_tokens": 4144, "size": 13874 }
\documentclass[a4paper, 12pt]{article} \usepackage{graphicx} \usepackage{epsfig} \usepackage{epstopdf} \usepackage{caption} \usepackage{float} \graphicspath{{/Users/hyowonshin/Library/texmf/}} \captionsetup[figure]{font=small, labelfont=small} \begin{document} \thispagestyle{plain} \begin{center} \large WORKING PAPER \vspace{0.8cm} \Large \textbf{Outgroup Trust and Ethnic Voting in New Democracies: Evidence from Sub-Saharan Africa\footnote{This study has been pre-registered on the OSF website on March 26, 2021. Please refer to this link: https://osf.io/4pn6e.}} \vspace{0.4cm} \large Hyo-Won Shin\footnote{PhD candidate at the University of Illinois, Urbana-Champaign. [email protected].} \vspace{1cm} \large This version: May 24, 2021 \vspace{1cm} \textbf{Abstract} \end{center} This study examines the relationship between out-group trust and ethnic voting across new democracies in Sub-Saharan Africa. I propose two mechanisms through, which out-group trust influences voting behavior in ethnically salient contexts. The information receptivity mechanism hypothesizes that voters with high levels of out-group trust and has greater access to information on candidates are less likely to vote for a co-ethnic candidate. The collective action mechanism proposes that individuals with high levels of out-group trust and high level of information on the voting intentions of co-ethnic and non-co-ethnic members are less likely to vote for a co-ethnic candidate. I test the relationship between out-group trust and ethnic voting using the Wave 3 Afrobarometer survey data for 10 Sub-Saharan new democracies. Results derived from a multilevel model show support for the hypothesis that individuals with high levels of out-group trust are less likely to vote for a co-ethnic candidate. Furthermore, results show mixed outcomes for the information receptivity mechanism, where individuals with high levels of information and high level of out-group trust are no more likely to vote for a co-ethnic than those with high levels of information and low level of out-group trust. Only those with a middle level of information and high levels of out-group trust are less likely to vote for a co-ethnic than those with middle level of information and low levesl of out-group trust. I find no support for the collective action mechanism.\\ \pagebreak \section{Introduction} For many years, scholars have argued that divisions along ethnic lines may be detrimental to the consolidation of democracy\cite{dahlPolyarchyParticipationOpposition1973, horowitzEthnicGroupsConflict1985, lijphartDemocracyPluralSocieties1977, rabushkaPoliticsPluralSocieties1972}. In societies where people identify strongly with their ethnicity, political outcomes such as voting\cite{adidaAfricanVotersFavor2015, barretoISiSePuede2007}, redistribution\cite{houleInequalityEthnicDiversity2017}, and conflict\cite{caselliTheoryEthnicConflict2013, kingDiversityViolenceRecognition2020} also tend to be divided along ethnic lines. A strong association between political outcomes and ethnic identity can be harmful to democratic consolidation as it may undermine democratic accountability, political stability, and social harmony. \paragraph{} Voting, a key feature of democracy, has been found to hinder democratic consolidation when done along ethnic lines\cite{houleDoesEthnicVoting2018}. Ethnic voting is harmful to democracy as it 1) reduces ex ante uncertainty of voting, 2) encourages patronage politics, and 3) pushes candidates to take extreme policy stances leading to polarization. As a result, countries that vote along ethnic lines may appear to be moving towards democracy, as voting is deemed the essence of democracy, but in actuality, they may be experiencing political patterns that in fact are preventing democratic consolidation. \paragraph{} If ethnic voting is pernicious, what might encourage individuals to vote across ethnic lines? According to the social capital literature, social trust is essential in building a robust democracy as it has been known to decrease discrimination and increase willingness to cooperate with others at the individual level, and improve collective action, economic growth and institutions at the national level\cite{uslanerMoralFoundationsTrust2002, bigelowDemocracyAmericaVolume1899, inglehartTrustWellbeingDemocracy1999, putnamWhatMakesDemocracy1993}. Social trust’s known ability to bridge individuals and groups, and thus promote democracy brings me to my research question; can an increase in trust across ethnic groups affect individuals’ voting behavior in contexts where ethnicity is salient? \paragraph{} I argue that the detrimental effect of ethnic diversity on democratic consolidation will be less prominent in contexts where individuals extend trust beyond their own ethnic groups (i.e., display high out-group trust). In particular, I look at the relationship between the radius of trust (i.e. the level of in- and out-group trust) and the extent of ethnic voting\cite{houleDoesEthnicVoting2018}. Since trust and voting behavior vary from individual to individual, I study this question at the individual level. The mechanisms through which the radius of trust determines the extent of ethnic voting are 1) a voter’s propensity to credit or discredit positive information on non-co-ethnic candidates or parties (information receptivity mechanism), and 2) a voter’s expectation of both co-ethnic and non-co-ethnic voter’s voting behavior (collective action mechanism). \paragraph{} This study on the role of out-group trust on democratic consolidation in ethnically diverse and salient settings speaks to a number of literatures. First, this study can add to the ethnic voting literature, particularly to the discussion on the conditions under which ethnicity is a significant predictor for vote choice\cite{chandraWhyEthnicParties2004, conroy-krutzInformationEthnicPolitics2013, posnerPoliticalSalienceCultural2004, dunningCrosscuttingCleavagesEthnic2010}. Second, it can contribute to the on-going debate in the social capital literature on whether and how social trust contributes to democratic development\cite{almondCivicCulturePolitical1989, inglehartRenaissancePoliticalCulture1988, mullerCivicCultureDemocracy1994a, inglehartPoliticalCultureDemocracy2003, rafaellaportaTrustLargeOrganizations1997, putnamWhatMakesDemocracy1993, riceSocialCapitalGovernment2001, knackSocialCapitalQuality2002a, uslanerMoralFoundationsTrust2002, backWhenTrustMatters2016, crepazWhatTrustGot2017a}. By considering the role of social trust in the relationship between ethnic identity and vote choice, we can not only learn about the extent to which ethnicity becomes the prime heuristic for people’s vote choice, but also whether social trust is a significant predictor of voting behavior in ethnically salient contexts. \paragraph{} The paper will proceed as follows. First, I explain why ethnic voting is important to consider when studying democratic consolidation and how it is detrimental to its progress. This is followed by a brief literature review on possible solutions for the negative consequences of ethnic diversity, including increasing social trust across ethnic lines. Then I describe explanations for why people vote along ethnic lines and discuss how out-group trust could possibly deter individuals from voting along ethnic lines. The following section lay out my proposed mechanisms for how out-group trust could lower the likelihood of individuals voting along ethnic lines. These are 1) the information receptivity mechanism, and 2) the collective action mechanism. This is then followed by a empirical and results section. \section{Literature Review} \subsection{Ethnic Voting and Democratic Consolidation} A democracy is consolidated, according to Linz and Stepan\cite{linzBreakdownDemocraticRegimes1978}, when democracy itself is “the only game in town.” When change is made, it is made through the democratic processes institutionalized in that country, rather than through authoritarian measures. Diamond\cite{diamondDemocraticConsolidation1994} says that democratic is consolidated when it “becomes so broadly and profoundly legitimate among its citizens that it is very unlikely to break down.” The possibility of a single person or party taking power is unlikely to happen, because the norms of democracy have become engrained in the system. A key component of both these characterizations of consolidation is elections. While consolidation also includes rule of law, independent judiciary, and a robust civil society, competitive elections are the base upon which these other factors build\cite{linzConsolidatedDemocracies1996}. \paragraph{} Competitive elections guarantee a continuation of democracy because the contestation between candidates or parties prevents a single authority from staying in power indefinitely\cite{przeworskiDemocracyDevelopmentPolitical2000}. Ethnic voting or voting using ethnic cues to decide who to vote for, on the other hand, can be detrimental for democratic consolidation as it can undermine the competitive electoral process. According to Houle\cite{houleDoesEthnicVoting2018}, ethnic voting poses a danger to democratic consolidation for three reasons: ethnic voting 1) reduces ex ante uncertainty of voting, which is a fundamental characteristic of democracy\cite{przeworskiDemocracyDevelopmentPolitical2000}, 2) encourages patronage politics\cite{chandraWhyEthnicParties2004}, and 3) pushes candidates to take extreme policy stances leading to polarization\cite{horowitzEthnicGroupsConflict1985, rabushkaPoliticsPluralSocieties1972, chandraWhyEthnicParties2004, houleDoesEthnicVoting2018}. \paragraph{} The first point refers to the Przeworski’s definition of democracy, which he defines as a “system in which incumbents lose elections and leave office when the rules so dictate”\cite{przeworskiDemocracyDevelopmentPolitical2000} (54). The key characteristic of democracy he argues is contestation in the form of elections. For elections to be considered legitimate, they must fulfill three criterion: 1) ex-ante uncertainty (anyone can win), ex-post irreversibility (losers do not try to reverse results), and repeatability\cite{przeworskiDemocracyDevelopmentPolitical2000} (16). Ethnic voting makes it highly likely that the first criteria, ex ante uncertainty, will be violated. When politics are divided along ethnic lines, politicians are likely to appeal to their co-ethnic voters and those voters are more likely to vote for them. Since ethnicity is a sticky trait, voting along ethnic lines make the electoral outcomes more predictable. As ethnicity becomes more important to the voters, the demographics of the country will pre-determine who the winner and loser will be. An example of a country in which ethnicized politics has led to long term rule for particular parties is Kenya. Here, politics have always been dominated by parties led by Kikuyus , the biggest ethnic group in Kenya. On the other hand, in places where ethnicity is not the key factor for vote choice, the electoral outcomes will be more difficult to determine as voters may be more likely to switch parties based on their policies and past performances. \paragraph{} Decreased unpredictability of electoral outcomes is bad for democracy as it undermines the legitimacy of the institution, which then discourages electoral losers from participating in future elections and having trust in their outcomes. Since they are likely to find the electoral results untrustworthy, they will have little interest in supporting the regime. Rather, they may have incentive to undermine democracy by staging a coup and installing a government led by their ethnic group or not partaking in electoral processes that further decreases their legitimacy. Groups in power, on the other hand, may try to consolidate their power by weakening the rule of law, taking away minority rights, or even by staging self-coups. All of these efforts from either side can lead to the fall of democracy. \paragraph{} A second mechanism through which ethnic voting can erode democracy is by encouraging patronage politics. Patronage politics refers to a spoils system in which electoral winners exchange favors for votes. In places where votes are based on the candidate’s ethnicity, incumbents are less interested in the well-being of their citizens as a whole and more focused on pleasing their co-ethnic constituents. As a result, the incumbent is less likely to distribute public goods that benefit the country as a whole and more likely to give up patronage goods (e.g., provide public sector jobs) to their supporters. On the other hand, countries that do not vote along ethnic lines are more likely to eschew patronage politics and instead incumbents are likelier to appeal to all voters by providing public goods to the whole population.\cite{chandraWhyEthnicParties2004} \paragraph{} Patronage politics excludes electoral losers from accessing state resources, which in turn harms their well-being. Being excluded from accessing well-paid jobs can directly harm their socio-economic status, which in turn increases the economic inequality between the electoral winners and losers. The inequality then becomes a source of grievance leading to conflict that erodes and destabilizes democracy\cite{houleDoesEthnicVoting2018}. \paragraph{} Lastly, ethnic voting can harm democracy via ethnic out-bidding and resulting polarization. Ethnic out-bidding refers to the process where elites within the same group compete for votes by taking on a more extreme position than the other. When voting is primarily based on ethnicity, appealing to non-co-ethnic voters becomes unnecessary. As a result, candidates become more and more polarized in their stance as they try to outbid their competing co-ethnic candidate. The radicalized policies and rhetoric drive ethnic and co-ethnic groups further apart from one another, which can then lead to an emergence of “pernicious polarization”, a phenomenon where a society splits into mutually distrustful “us” versus “them” camps\cite{mccoyPolarizationGlobalCrisis2018}. In an extremely polarized environment, politicians are motivated to appeal to voters by proposing extreme policies, which favor co-ethnics and discriminate against non-co-ethnics. Voters, on the other hand, are influenced to loath, fear and distrust non-co-ethnics, which can in worst case scenarios lead to civil unrest and conflict\cite{bhavnaniEthnicPolarizationEthnic2009, devottaEthnicOutbiddingEthnic2005}. \paragraph{} Polarization along ethnic lines, according to McCoy and Somer\cite{mccoyTheoryPerniciousPolarization2019}, is especially detrimental to democracy as compared to cleavages based on issues or values. This is because cleavages formed around identity and belonging raises the “question (of) who has the right to live in a polity as a full citizen and whether one group can claim exclusive legitimacy to represent the citizens in the government”\cite{mccoyTheoryPerniciousPolarization2019} (263-64). Since these issues question the very existence of individuals, decisions derived from ethnic politics will directly affect the daily lives of the people. As electoral losers seek to regain power, they may work against the norms of competitive elections. Winners may also work against democratic norms in their attempts to hold onto power\cite{mckennaAreDiverseSocieties2018}. With violation of democratic norms from both sides, the country faces the danger of democratic backsliding. Such was the case in Kenya during the 2007 elections, where the incumbent’s alleged electoral manipulation led to the outbreak of ethnic violence targeting the incumbent’s ethnic group. \paragraph{} Empirical studies support the theorized detrimental effect of ethnic voting on democracy. Results from Houle’s\cite{houleDoesEthnicVoting2018} study on ethnic voting and democracy across 58 democracies, as shown in Table 1, indicate a negative relationship between ethnic voting and democracy. To measure ethnic voting, Houle calculated the degree to which people of a given group vote for different parties than other groups of the same country. The score ranges from 0 to 1, where 0 indicates that members of group i vote in exactly the same way as other groups from the same country and 1 where members of group i vote strictly along ethnic lines. For levels of democracy, he used both the Polity score and the Freedom House score. The scores are based on a number of criteria including presence of a competitive election. The score for Polity ranges from -10 to 10, while the latter from one to seven, where higher scores indicate higher democracy or greater democratic consolidation. If countries score high on these measures, they are more likely to be democratically consolidated, including holding competitive elections. Using these measures, Houle finds ethnic voting is significantly correlated with a reduction in the quality of democracy. A country with a Polity score of 6.0, for example, would have a Polity score of 6.35 if its ethnic voting level (GVF) was at the 5th percentile of the distribution. On the other hand, if the same country’s ethnic level were at the 95th percentile of the distribution, its Polity score would be 5.93. \subsection{Social Trust and Political Participation} Given that ethnic voting appears to be detrimental to democratic consolidation via its impact on voting behavior, how can we discourage voting along ethnic lines? Social capital scholarship provides insight into how social trust can encourage people vote across ethnic lines. \paragraph{} According to the social capital literature, social trust is a key foundation for democratic consolidation. Social trust is defined as a general disposition to what extent one trusts strangers or unfamiliar others\cite{uslanerSegregationMistrustDiversity2012, uslanerMoralFoundationsTrust2002}. It has been argued that social trust makes associations easier to create as it cuts down transaction costs related to formal coordination mechanisms like contracts, hierarchies, bureaucratic rules, and others\cite{fukuyamaTrustSocialVirtues2000, putnamBowlingAloneAmerica2000, uslanerMoralFoundationsTrust2002, warrenTrustDemocracy2018}. Elections is one area that can benefit from associations fostered by social trust. According to Keefer, Scartascini, and Vlaicu\cite{keeferSocialTrustElectoral2019}, low voter trust in each other is a fundamental concern when it comes to the quality of government. They argue that if voters do not trust that their fellow voters to act with them to hold politicians accountable, politicians will have less of a reason to fear the electoral consequences of breaking their promises\cite{keeferSocialTrustElectoral2019} (2). Using Latin American data, they find a strong correlation between low trust and preferences for policies associated with low quality and populist governments. As such evidence shows, social trust is closely associated with democratic consolidation as it has the ability to encourage collective action in keeping politicians accountable. \paragraph{} Empirical studies provide support for association between social trust and various aspects of democracy. At the country-level, studies find that social trust, usually measured as the percentage of respondents who agreed to the statement ‘most people can be trusted’, is a significant predictor for stable democracy, levels of democracy, and years of continuous democracy\cite{inglehartModernizationPostmodernizationCultural1997, inglehartTrustWellbeingDemocracy1999, mullerCivicCultureDemocracy1994b}. At the macro-level (e.g., national and community), studies find similar results where social trust is positively correlated with economic growth\cite{fukuyamaTrustSocialVirtues1995, knackDoesSocialCapital1997, dasguptaSocialCapitalMultifaceted2000}, lower crime rates\cite{jacobsDeathLifeGreat1992, wilsonTrulyDisadvantagedInner2012}, more responsive government\cite{putnamWhatMakesDemocracy1993}, and favorable view of the government\cite{riceSocialCapitalGovernment2001, knackSocialCapitalQuality2002, laportaQualityGovernment1999}. At the individual-level, high levels of social trust significantly predicts high levels of confidence in government (Brehm and Rahn 1997) and higher likelihood of protest\cite{bensonInterpersonalTrustMagnitude2004}. \paragraph{} On the other hand, absence of social trust has been found to have a detrimental effect on social and political stability. Previous studies on diverse societies, where the level of social trust is generally found to be low\cite{dinesenEthnicDiversitySocial2020}, find a positive association with conflict\cite{varshneyEthnicityEthnicConflict2009}, poor governance\cite{alesinaPublicGoodsEthnic1999}, low social capital\cite{alesinaEthnicDiversityEconomic2005}, and poor economic performance\cite{alesinaEthnicDiversityEconomic2005, easterlyAfricaGrowthTragedy1997}. \paragraph{} In contexts where ethnic identity is salient, social trust towards non-co-ethnics or out-groups matters. When social, political, and economic aspects of life are divided along ethnic lines, non-co-ethnic individuals or groups become potential competitors for resources. Since these non-co-ethnic members or groups are viewed as potential competitors, it is likely that the ability to trust these members and groups would also be low. When trust for non-co-ethnic or out-group members is low, cooperation across groups will be difficult, which then could have a detrimental effect on social and political stability. Studies examining the relationship between out-group trust and democracy find that countries with higher levels of out-group trust are more likely to score higher on the democratic scale. Delhey and co-authors\cite{delheyHowGeneralTrust2011} test the correlation between the radius of trust towards out-group members and democratic awareness and level of democracy across 51 countries using the World Values Survey data. They find a significant and positive association between trust and two measures of democracy. \paragraph{} At the individual-level, Crepaz and co-authors\cite{crepazWhatTrustGot2017} also use World Values Survey data to find that individuals with high levels of out-group trust participate more actively in nonconventional political activity, such as participating in demonstrations, boycotts, and signing a petition. They also find that the presence of out-group trust had a slightly negative impact on voting. They explain that out-group trusters are more likely to engage in unconventional political behavior than conventional ones because they are “other regarding,” altruistic, and extroverted\cite{stollePoliticsSupermarketPolitical2005}. Their motivation for political participation lies not only in self-enrichment but also the pursuit of the common good. Out-group trusters, therefore, are more likely to engage in unconventional political activities that can demand change and solve collective problems. \subsection{Non-Co-Ethnic Trust and Ethnic Voting} While previous studies on out-group trust and democracy show evidence of a positive relationship, scholarship has not addressed how trust across ethnic groups impacts ethnic voting and the mechanisms through which out-group trust influences voting behaviors. First, studies on out-group trust and democracy have aggregated multiple trust measures into a single index. Studies using the World Values Survey data construct an out-group trust index by averaging the level of trust across three groups: people you meet for the first time, people of another religion, and people of another nationality, none of which directly addresses ethnic lines. While the question of religion may capture trust across ethnic groups in some contexts, it may not be the case for countries where ethnicity and religious diversity do not overlap with one another. As a result, the question of trust towards people of another religion would not capture the ethnic tension the country is suffering from. By looking specifically at non-co-ethnic trust, one form of out-group trust, I hope to better understand the relationship between out-group trust and democracy. \paragraph{} Second, research on out-group trust and voting in democracies has yet to look at the effect of non-co-ethnic trust on ethnic voting. Previous studies have looked at the relationship between out-group trust and type of political participation individuals engage in, but they did not take into consideration ethnic contexts and how it would alter their voting behaviors. As a result, this paper seeks to understand the effects of trust across ethnic groups on individuals’ motivation to vote along ethnic line. If social trust has the ability to promote democratic behaviors, as per the social capital literature, we should expect to see a decreased motivation to vote along ethnic lines among those with high levels of non-co-ethnic trust. Furthermore, I seek to test the mechanisms through which non-co-ethnic trust has on voting behaviors in ethnically salient contexts. While the literature on social trust and political participation hint at a number of mechanisms through which trust influences voting behavior, it has yet to be explicitly tested for. \section{Theory on Outgroup Trust and Voting Behavior in New Democracies} \subsection{Definitions and Concepts} Trust refers to the belief that “others will not act opportunistically to take advantage of them”\cite{keeferSocialTrustElectoral2019}. Trust, according to the social capital literature, is considered beneficial for societies as it stimulates cooperation between citizens in general, including those that are divided socially and culturally\cite{bigelowDemocracyAmericaVolume1899, uslanerMoralFoundationsTrust2002}. Trust, especially trust extended to “strangers”, enhances feeling of common moral foundations, identity, and norms, all of which motivate people to achieve common goals that contribute to a democratic society\cite{putnamBowlingAloneAmerica2000, oskarssonGeneralizedTrustPolitical2010}. \paragraph{} In the context of ethnically divided societies, people’s ability to trust out-group members, seems like a possible solution to the deleterious consequences of voting along ethnic lines. Here, I theorize that voters who are able to trust individuals outside their identity group, as in voters with a larger radius of trust, are more likely to 1) incorporate information on competing candidates or parties into their voting decision (information receptivity mechanism) and 2) have confidence that non-co-ethnic voters will vote for qualified candidates or parties that will distribute public goods (collective action mechanism). \paragraph{} Before I explain the mechanisms on the relationship between radius of trust and voting behavior, I will define concepts relevant to the theory. First, radius of trust refers to the width of the “circle of people among whom cooperative norms are operative”\cite{fukuyamaTrustSocialVirtues2000}. When the radius of trust is narrow, trust is extended to people who are familiar to you including family members, neighbors, people you know or have met before, people of the same ethnicity, religion, age and so on. This type of trust is also known as particularized, in-group, specific, or “thick” trust\cite{zmerliSocialTrustAttitudes2008, delheyHowGeneralTrust2011}. A wide radius of trust, on the other hand, refers to trust in strangers and people whom we have little knowledge about. This type of trust is referred to as generalized, “impersonal” or “thin” trust\cite{gaidyteExplainingPoliticalParticipation2015}. According to Delhey, Newton and Welzel\cite{delheyHowGeneralTrust2011}, as the radius of trust increases so does the circle of cooperation. In contexts where ethnicity is a salient identity, “thick” trust is also referred to as in-group trust as in these settings friends and family members usually come from the same ethnic group. “Thin” trust, on the other hand, is often labeled as out-group trust or trust extended to those beyond one’s in-group members. In the next section, I present three possible mechanisms through which the radius of trust can influence the likelihood of voting along ethnic lines. \subsection{Why New Democracies?} In this paper, I test whether the relationship between outgroup trust and voting behavior travels across new democracies. The reason for focusing on new democracies is because they may be more prone and vulnerable to ethnic voting. In new democracies, weak opposition parties’ inability to credibly promise to enact policies drives clientelism\cite{keeferClientelismCredibilityPolicy2007a}. This leads to politicians distributing goods to targeted groups rather than providing public goods. In such an environment, voters are likely to respond to such appeals by voting along ethnic lines. \paragraph{} Furthermore, citizens in new democracies, according to Letki\cite{letkiTrustNewlyDemocratic2018}, tend to rely on in-group trust compared to those in consolidated democracies. While not always the case, new democracies with an authoritarian past experiencing transition to democracy and market economy tend to have on average high levels of trust towards immediate friends and family but low levels of trust towards strangers. They are less likely to take risks that involve trusting strangers and may on occasions try to exploit the “other” in worry that others do not share their values\cite{banfieldCorruptionFeatureGovernmental1975, uslanerCivicEngagementParticularized2016}. When people withdraw from wider contact, they will not be able to reap the benefits of social capital. They, according to Uslaner and Conley\cite{uslanerCivicEngagementParticularized2016}, may “at best become hermits isolated from civic engagement. At worst they might reinforce prejudices against strangers when they interact only with people like themselves” (333). Social isolation resulting from low levels of out-group trust can further divide societies, reinforce prejudices, and in some instances lead to conflict that can lead to the destabilization of democracy. \paragraph{} In general, new democracies, compared to consolidated democracies, may be more prone to voting along ethnic lines and have lower levels of out-group trust. This, however, is not always the case; there is still variation in the levels of ethnic voting and out-group trust among new democracies. Research shows that ethnicity is not always the key predictor of voting behaviors\cite{horowitzEthnicitySwingVote2019, basedauEthnicityPartyPreference2011, houleStructureEthnicInequality2018} and that outgroup trust is not always low in all new democracies. According to Inglehart\cite{inglehartTrustWellbeingDemocracy1999a}, people living in countries with legacies of oppression are less likely to trust their fellow citizens nor participate in civic life. \paragraph{} In addition to variation in ethnic voting and outgroup trust levels, I have reason to believe social trust is a stronger predictor for voting behavior among new democracies than established democracies. Researching focusing on the differences between new and consolidated, historically, Western democracies find that, unlike effective and responsive political institutions present in consolidated democracies, states transitioning to democracies often suffer from institutional deficiencies early in their tenure\cite{huntingtonHowCountriesDemocratize1991, sorensenDemocracyDemocratizationProcesses2008}. In places where institutions are well-developed, as in the case of most consolidated democracies, people may be less reliant on social trust to navigate the world. Well-developed institution, free of corruption and discrimination, can help people live their day-to-day life without the fear of being cheated and taken advantage of. In countries with underdeveloped institutions, on the other hand, may need to rely instead on the help of their community, which includes strangers, to navigate their daily life. Furthermore, in consolidated democracies, it will be harder to tease apart the relationship between outgroup trust, institutional quality, and voting behavior as it is uncertain what factors enforce what. But considering the general low quality of institutions and out-group trust among new democracies, it would be easier to tease apart the true effect of out-group trust on ethnic voting. As a result, considering the weak institutional strength, I argue that outgroup trust is going to be a strong predictor of voting behavior across new democracies. \subsection{Theory on Outgroup Trust and Voting Behavior} In this section, I propose two mechanisms that explain the relationship between outgroup trust and voting behavior in ethnically salient contexts. They are the information receptivity mechanism, and collective action mechanism. \subsubsection{Mechanism 1: Information receptivity mechanism} The first mechanism that explains the relationship between the radius of trust and the likelihood of voting along ethnic lines is the voter’s propensity to consider different types of candidate information. This cognitive explanation hypothesizes that the radius of trust determines whether a voter, when receiving information about co-ethnic and non-co-ethnic candidates or parties, credits or discredits that information. This explanation adds to the information and accountability literature, which examines the effect of electoral information on voting behavior. As voters have access to additional information on politics, thereby cultivating a more-informed electorate, the salience of ethnic identity divisions in democratic politics will be reduced. The general argument here is that access to additional information on politics or cultivating an informed electorate may help reduce the salience of ethnic identity division in democratic politics. \paragraph{} While there is evidence that negative information on co-ethnic candidates or parties will dampen co-ethnic voter support\cite{conroy-krutzInformationEthnicPolitics2013}, there is also evidence that voters selectively choose the information they want to consider when making their vote choice\cite{adidaReducingReinforcingInGroup2017}. Contrary to the general expectation that increased information about candidate quality will reduce the importance of ethnicity in shaping one’s overall voting decision, Adida and co-authors instead find that voters engage in ethnically motivated reasoning where they consider positive information about co-ethnics as relevant and negative information as irrelevant to their vote choice. The opposite was true for non-co-ethnic members where voters considered positive information as irrelevant while negative information was relevant to their vote choice. In general, voters appear to choose what information they incorporate into their vote preferences based on their ethnic group membership. \paragraph{} I argue that increasing out-group trust can dampen people’s desire to engage in ethnically motivated reasoning and instead incorporate negative (positive) information on co-ethnic (non-co-ethnic) candidates or parties more seriously in their voting decisions. When trust is extended to those beyond their in-group members, people may deem non-co-ethnic individuals as trustworthy and honest (i.e., as someone who would not betray them). For individuals with high levels of out-group trust, positive information about non-co-ethnic candidates presents useful and believable information to consider when determining who to vote for, because the individual deems the non-co-ethnic members to be trustworthy and honest. As a result, these individuals, when provided both positive and negative information on co-ethnic and non-co-ethnic candidates, are more likely to consider all types of information when making their vote choice. With all the information they have on co-ethnic and non-co-ethnic candidates, they will be able to vote for a more qualified candidate with higher accuracy. As a result, voters with high out-group trust (i.e., voters with a wider radius of trust) are less likely to engage in ethnic voting than the low out-group trusting voters (i.e., voters with a narrower radius of trust), who are more likely to engage in ethnically motivated reasoning. \paragraph{} The information receptivity mechanism is somewhat overlapping with the network mechanism, but I believe the two are conceptually distinct. For example, it may be case where one has a fairly homogenous network, but they may still be willing to accept information from a non-co-ethnic they encounter due to pre-existing levels of out-group trust (information receptivity mechanism). Or it may be the case that one encounters more kinds of information because they have a heterogenous network and high levels of out-group trust (network mechanism). On the other hand, it could be the case that those with a diverse network can still be prejudiced against information coming from a non-co-ethnic due to low levels of out-group trust. \begin{figure}[H] \centering \includegraphics[scale=0.35]{Mech2A} \end{figure} \subsubsection{Mechanism 2: Collective action mechanism} The second mechanism through which out-group trust can discourage voting along ethnic lines is through its effect on a voter’s perception that non-co-ethnic and co-ethnic voters will elect politicians that are qualified and distribute public goods. This mechanism differs from the two previous mechanisms because it is not a story of information but rather about individuals’ expectations about others’ voting behavior. This is similar to the ‘strategic selection mechanism’ theorized by Habyarimana and co-authors\cite{habyarimanaWhyDoesEthnic2007}, who argue that there are higher levels of public goods provision in ethnically homogenous communities because there exists a norm that cooperation among co-ethnics should be reciprocated and defections should be sanctioned. This theory assumes that in ethnically diverse societies, on the other hand, public goods provision is low as there is no unified norm of cooperation and sanctions. \paragraph{} The collective action mechanism proposed here frames individuals’ actions as also based on their expectations of others’ voting behavior, but not necessarily based on existing norms. I argue that individuals who extend trust towards non-co-ethnics are more likely to believe their out-group counterpart will cooperate and not defect in their voting decisions. Voters, when calculating their voting strategy, consider not only the competence of the candidates or parties, but also the strategy of fellow voters. They want their votes to contribute to the overall outcome and are likely to cast their vote for a candidate who is likely to win and likely to benefit the voter after elections as a result. When considering the strategic characteristic of voters, how individuals view others, and their intentions becomes crucial for one’s vote choice. According to Keefer, Scartascini, and Vlaicu\cite{keeferSocialTrustElectoral2019}, social trust is important in increasing the quality of government because it lowers the cost of collective action of demanding a better government. They argue that if voters can trust the other to contribute to the collective good of monitoring and expelling poorly performing incumbents, there is a higher incentive for individual voters to vote for qualified candidates or parties. Furthermore, the ethnic voting literature suggests negative evaluations of non-co-ethnics play an important role in motivating voting behavior. Across contexts, scholars have found evidence that prejudice and fears about the out-group plays a motivating role in co-ethnic voting\cite{kinderPrejudicePoliticsSymbolic1981, longDeterminantsEthnicVoting2012}. This is especially the case when there is social, political, and economic inequality across groups\cite{batesEthnicCompetitionModernization1974}. In such contexts, individuals who hold prejudicial views are less likely to support policies that benefit the out-group\cite{snidermanFallacyDemocraticElitism1991}. This complements the idea that ethnic voting tends to be prevalent in contexts where the other cannot be trusted, and thus that individuals will always vote in a way that disfavors the out-group rather than pursuing tactics that benefit the country as a whole. \paragraph{} Based on Keefer, Scartascini and Vlaicu\cite{keeferSocialTrustElectoral2019}, individuals with high levels of out-group trust are more likely to be optimistic about a non-co-ethnic voter’s openness to the idea of voting for a qualified candidate. High out-group trusters, compared to low out-group trusters, will have lower prejudice towards out-group members, and thus will tend not to think about politics from an “Us vs. Them” perspective. Rather, they are more likely to focus on what benefits not only their group, but the country as whole. In competitive electoral contexts, individuals, when thinking about their vote choice, are going to simultaneously think about who out-group members will vote for. Out-group trusters are more likely to think about others as allies rather than enemies, who may hold the same type of mindset as themselves. In this mindset, they may predict that non-co-ethnic voters are less likely to vote along ethnic lines and instead vote for a competent candidate or party. As a result, the individual is less likely to engage in ethnic voting than individuals with low levels of out-group trust. In other words, when voters are able to trust that non-co-ethnics will incur some costs of contributing to the collective good of keeping qualified candidates in power and expelling poor performing candidates, they are more likely to vote for the more qualified candidate, regardless of ethnicity. \begin{figure}[H] \centering \includegraphics[scale=0.35]{Mech3A} \end{figure} \subsection{Hypotheses} Based on the theoretical discussion above, I propose three hypotheses.\\ \\\textbf{H1.} Individuals with a \textit{high} level of out-group trust are \textit{less} likely to vote for a \textit{co-ethnic} candidate.\\ \\\textbf{H2.} Individuals with a \textit{high} level of out-group trust and \textit{high} propensity to consider different types of candidate information are \textit{less} likely to vote for a \textit{co-ethnic} candidate.\\ \\\textbf{H3.} Individuals with a \textit{high} level of out-group trust and \textit{high} level of information on the voting decision of in-group and out-group members are \textit{less} likely to vote for a \textit{co-ethnic} candidate.\\ \section{Research Design} \subsection{Data} To test the relationship between outgroup trust and voting behavior among in individuals in new democracies, I used data from the Afrobarometer Survey and Global Leadership Project. I chose to examine new democracies in Africa because these countries are not only ethnically diverse but ethnicity is also a salient identity when it comes to political mobilization. Here, I employ the definition of new democracies used by Grewal and Voeten\cite{grewalAreNewDemocracies2015}, which includes countries that have a Polity IV score of 6 or higher for less than 30 consecutive years. There are 10 new democracies in Wave 3 (2005) of the Afrobarometer survey that fit this defintion in Africa\footnote{New democracies included in Wave 3 of the Afrobarometer survey are Benin, Botswana, Ghana, Kenya, Madagascar, Malawi, Mali, Namibia, Senegal, and South Africa} at 2005. I based this study on data from Afrobarometer Wave 3 as this survey wave included questions on outgroup/interethnic trust and voting behavior at the individual-level. \paragraph{} The Global Leadership Project (GLP) is a dataset that offers biographical information on leaders throughout the world, including members of the executive, the legislature, the judiciary, and other elites who hold informal power\cite{gerringWhoRulesWorld2019}. This dataset includes information on the ethnicity of leaders, which will be used as part of measuring ethnic voting. \subsubsection{Dependent Variable} The main outcome variable is ethnic voting at the individual level. Here, ethnic voting is a dichotomous variable, operationalized as whether an individual voted for a co-ethnic candidate or not. To measure ethnic voting, the ethnicity of presidential candidates was matched with that of the respondent's ethnicity. Based on Afrobarometer's "If a presidential election were held tomorrow, which party's candidate would you vote for?" question, I made a list of presidential candidates or party leaders representing these parties of choice. Then the ethnicity of each party's presidential candidate or leader were identified using the GLP dataset. For those whose information was not available in the GLP dataset, I either located their ethnicity through a web search or left it blank. The errors that can arise from this coding will be discussed later in the limitations section. Once the ethnicity of presidential candidates or party leaders were identified, I matched their ethnicity with that of the respondent's ethnicity, as provided by the Afrobrometer. Respondents who voted for co-ethnic candidates were coded as 1 and those that voted for a non-co-ethnic candidate as 0. \subsubsection{Independent Variables} The main independent variable is out-group trust. Out-group trust is operationalized as the level of trust an individual has towards a non-co-ethnic member. This is measured using responses to the Afrobarometer question of, "How much do you trust each of the following types of people? Kenyans from other ethnic groups." This is an ordinal variable, in which the responses range from 0(Not at all) to 3(I trust them a lot). \paragraph{} To account for the information receptivity mechanism, I included a variable counting the number of media sources respondents used to get their news. The Afrobarometer includes questions asking respondents, "How often do you get news from the following sources? Radio; Television; Newspaper." The responses range from 0(Never) to 4(Everyday), and I compiled the three questions on these sources into one measure by adding the responses together, where lower values indicate low media access and high values greater media access. I make the assumption that respondents with higher levels of out-group trust are more open to a wider variety of political information and thus will seek it from a variety of news sources. This question is not a direct measure of exposure to diverse information, which means results must be interpreted with caution. Using this measure, I examined the interaction effect of out-group trust and information diversity, and how this affects voting behavior. \paragraph{} To test the collective action mechanism, a measure on individual's frequency of political discussion with friends and family was included. The Afrobarometer includes a question asking, "When you get together with your friends or family, would you say you discuss political matters?" The responses range from 0(Never) to 2(Frequently). Here, I assume that those who discuss political matters frequently are more likely to share their vote choice with friends and family than those who do not. As a result, those who actively discuss political matters are more likely to be knowledgeable about vote choice of others, which in turn will influence their own vote choice. Again, this is not a direct measure for one's knowledege on the vote choice of others, therefore, results based on this measure must be interpreted with caution. Using this meausre, I looked at the interaction effect of out-group trust and active political discussion, and how this influenced the respondent's voting behavior. \subsection{Empirical Strategy} A multi-level model was used to examine the relationship between out-group trust and ethnic voting as I believe voting behavior at the individual-level are influenced by individual, regional, and country level characteristics. Here it is assumed the level-1 observations are nested within level-2 units. To control for these characteristics, I include control variables at the individual(level-1) and country(level-2) level. At the individual-level, I control for five factors: age, gender, education, economic status, and political trust. I included political trust into the model as previous studies find a significant relationship between out-group trust and confidence in institutions\cite{caoIngroupOutgroupForms2015}, and I suspect political trust to have an effect on voting behavior as people with greater confidence in the institution are less likely to rely on informal cues such as ethnicity when making their vote choice. \paragraph{} At the country-level, I control for the three factors: GDP, years of democracy, and ethnic voting at the country level. I use Huber's\cite{huberMeasuringEthnicVoting2012} Group Voting Fractionalization measure, which measures the electoral distance between any two groups. The measure ranges from 0 to 1, where 1 refers to distance between the two groups, where all of \textit{i}'s supporters are from one group and all of \textit{j}'s supporters from a different group. Figures on GDP and years of democracy for 2005 were obtained from the World Bank and PolityIV dataset. This variable is included as I suspect out-group trust to be low in countries where politics is severely divided along ethnic lines and also voting behavior to be influenced by the voting pattern within the country. \section{Results} \subsection{Summary Statistics} \begin{table}[H] \setlength{\arrayrulewidth}{1mm} \setlength{\tabcolsep}{18pt} \renewcommand{\arraystretch}{2} \Huge \centering \resizebox{\textwidth}{!}{% \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|c|} \hline Variable & n & mean & sd & median & trimmed & mad & min & max & range & skew & kurtosis & se\\ \hline \multicolumn{13}{|c|}{Individual Level Variables}\\ \hline Ethnic Voting & 9505 & 0.33 & 0.47 & 0 & 0.29 & 0.00 & 0 & 1 & 1 & 0.73 & -1.47 & 0.00\\ \hline Out-group Trust & 9338 & 1.43 & 1.03 & 1 & 1.41 & 1.48 & 0 & 3 & 3 & 0.13 & -1.13 & 0.01\\ \hline Age & 9415 & 37.46 & 14.93 & 35 & 35.87 & 14.83 & 18 & 115 & 97 & 0.89 & 0.30 & 0.15\\ \hline Gender & 9505 & 1.48 & 0.50 & 1 & 1.48 & 0.00 & 1 & 2 & 1 & 0.07 & -2.00 & 0.01 \\ \hline Education & 9476 & 3.01 & 2.02 & 3 & 2.95 & 1.48 & 0 & 9 & 9 & 0.19 & -0.41 & 0.02\\ \hline Economic Status & 9462 & 2.11 & 1.40 & 2 & 2.14 & 1.48 & 0 & 4 & 4 & 0.05 & -1.28 & 0.01 \\ \hline Political Trust & 8881 & 1.87 & 1.07 & 2 & 1.97 & 1.48 & 0 & 3 & 3 & -0.47 & -1.06 & 0.01\\ \hline \multicolumn{13}{|c|}{Country Level Variables}\\ \hline GDP & 9505 & 2269.64 & 2215.20 & 822.46 & 2111.72 & 745.63 & 289.56 & 5513.33 & 5223.77 & 0.56 & -1.54 & 22.72\\ \hline Democratic Yrs & 9505 & 11.89 & 10.60 & 11.00 & 9.84 & 5.93 & 1.00 & 39.00 & 38.00 & 1.63 & 1.95 & 0.11\\ \hline Country Ethnic Voting & 9505 & 0.18 & 0.08 & 0.16 & 0.17 & 0.07 & 0.06 & 0.33 & 0.27 & 0.58 & -1.32 & 0.03\\ \hline \end{tabular}% } \caption{Summary Statistics for Individual and Country Level Variables } \end{table} \paragraph{} Table 1 reports the summary statistics for both individual and country-level variables included in the model. At the individual-level, there were a total of 9505 respondents who said they would vote for a party if the presential election were held tomorrow. For \textit{ethnic voting}, the main outcome variable, the responses were dichotomous where 0 was assigned to those who voted for a non-co-ethnic candidate and 1 for those who voted for a co-ethnic candidate. The average response score was 0.33, meaning more respondents voted for a non-co-ethnic candidate than a co-ethnic one. Regarding \textit{out-group trust}, the responses ranged from 0 to 3, where 0 signified no trust and 3 a lot of trust. The mean score was 1.43 meaning that the majority of respondents had very little trust towards non-co-ethnic members. The respondents were on average in their \textit{mid-thirties}, \textit{males}, \textit{received some primary schooling}, and \textit{had gone without cash income once or twice in the past year}. Lastly, respondents' level of \textit{political trust} was on the higher end, where the majority said they somewhat trusted their country's Parliament/National Assembly. \paragraph{} Ten Sub-Saharan new democracies varied in their level of GDP per capita, democratic years, and level of ethnic voting. For levels of GDP per capita, Malawi had the lowest level of GDP and Botswana the highest. For democracy years, I subtracted the year a country's polity score changed to a 6 from 2005. Mali was youngest democracy with a year of 1 and Botswana the oldest with a year of 39. Lastly, countries also varied in their country's ethnic voting score, with Senegal with the lowest level of ethnic voting, 0.059 and Kenya with the highest level 0.332. \subsection{Results from Multilevel Analysis} \begin{table}[H] \centering \includegraphics[scale=0.5]{maintrust} \caption{Results for Out-Group Trust and Ethnic Voting} \end{table} \paragraph{} Table 2 presents the results of the multilevel analysis. Overall, the results show strong support for the first hypothesis and weak support for the second hypothesis, but find no support for the third hypothesis. \paragraph{} Comparing across model 1, 2, and 3, I find that the variable measuring out-group trust (\textit{ietrust}) remains significant even after including individual and country-level covariates. The coefficients across the model are negative and statistically significant meaning that my first hypothesis is supported. According to these models, individuals with a \textit{high} level of out-group trust are less likely to vote for a \textit{co-ethnic} candidate, and individuals with a \textit{low} level of out-group trust are more likely to vote for a \textit{co-ethnic} candidate. Furthermore, using the ANOVA test (table 3), I compare the deviance statistics of model 3 \textit{without}(Model 3a) and \textit{with}(Model 3b) the \textit{ietrust} variable. The deviance statistics decreases and is significant after including the \textit{ietrust} variable, indicating that this is a better fitting model. \begin{table}[H] \setlength{\arrayrulewidth}{1mm} \setlength{\tabcolsep}{18pt} \renewcommand{\arraystretch}{2} \Huge \centering \resizebox{\textwidth}{!}{% \begin{tabular}{|c|c|c|c|c|c|c|c|c|} \hline Model & npar & AIC & BIC & logLik & deviance & Chisq & Df & Pr(>Chisq) \\ \hline Model 3a & 10 & 10194 & 10265 & -5087.1 & 10174 & & & \\ \hline Model 3b & 11 & 10187 & 10265 & -5082.5 & 10165 & 9.2457 & 1 & 0.002361 ***\\ \hline \multicolumn{9}{|c|}{Note: *p$<$ 0.1; **p$<$0.05; ***p$<$0.001}\\ \hline \end{tabular}% } \caption{ANOVA Test Comparing Models With and Without Out-Group Trust} \end{table} \paragraph{} Here, I also find age, gender, education, and political trust variables to have a significant effect on voting behvior across individuals. Those who are \textit{older} and are \textit{females} tend to be more likely to vote for a co-ethnic candidate. Moreover, individuals with fewer years of education tend to also vote for a co-ethnic candidate. Interestingly, economic status had no impact on whether an individual votes along ethnic lines or not. Individual's trust towards Parliament or National Assembly also had an effect on the level of ethnic voting. Those with higher levels of political trust were more likely to vote for a co-ethnic candidate than those with lower levels of political trust. None of the country-level variables, however, are significant. I suspect that this is due to the small number of countries included in the model. \paragraph{} Model 4 tests the second hypothesis, which states that individuals with a \textit{high} level of out-group trust and \textit{high} propensity to consider different types of candidate information are more likely to vote for a \textit{non-co-ethnic} candidate. By interacting variables \textit{ietrust} and \textit{information acc}, I test to see whether individuals with high levels of out-group trust \textit{and} greater exposure to information through media are less likely to vote along ethnic lines. Results show that the interactive term \textit{ietrust:information acc} is positive and significant. However, model coefficients are harder to interpret at face value for interactive terms. As a result, I examine how different levels of out-group trust has different effects for different levels of information access on ethnic voting. \begin{figure}[H] \centering \includegraphics[scale=0.65]{PPEV1 copy} \caption{Predicted probabilities for Ethnic Voting with Interactive Term (ietrust*information acc) } \end{figure} \paragraph{} Figure 1 is a visualization of the changes in outcome (ethnic voting) with changes in the main independent variable (out-group trust) at different levels of information access in a model with an interaction term. We can see from the figure that each regression line has different slopes for different level of media access. The green line indicates the effect of out-group trust on ethnic voting among individuals who had the highest level of access to information via media, the blue line with a medium level of information access, and green line for individuals with lowest level of information access. We can see that the red line has the steepest slope where as the green line has the flattest slope. This indicates that out-group trust tends to have the greatest effect on ethnic voting among those with a lowest level of information acess, followed by those with medium level access (blue), and the highest access (green). The results provide mixed support for my hypothesis as those with medium (blue) and low (red) levels of information access (blue) tend to behave in the hypothesized direction, but for those with high levels of information access out-group trust has neglible effect on ethnic voting. For those with the highest level of information access (green), there was no difference in the level of ethnic voting among those with low and high levels of out-group trust. Rather, it was the individuals with low levels of information access that showed the most difference in the levels of ethnic voting. A probably explanation for this result is that too much information can have a backfiring effect on people's vote choices. It may be that too much information confuses the voters and in turn they resort to basic cues such as ethnicity instead of incorporating information collected through media into their vote choice. \paragraph{} Next, model 5 finds no support for the third hypothesis, which states that individuals with a \textit{high} level of out-group trust and \textit{high} level of information on the voting decision of in-group and out-group members are more likely to vote for a \textit{non-co-ethnic} candidate. When I include the \textit{pol discuss} variable into the model, \textit{ietrust} is no longer significant. Rather, I find evidence for the \textit{pol discuss} variable where individuals with greater level of political discussion with friends and family tend to vote for a co-ethnic. I suspect that either the variable is not accurately capturing people's experiences of having political discussion with not only co-ethnic but also non-co-ethnic members or that people in Sub-Saharan Africa rarely have political discussions with non-co-ethnic members. \section{Discussion and Conclusion} In conclusion, the multilevel analysis finds a significant relationship between out-group trust and ethnic voting at the individual level. I, however, find limited support for the information receptivity mechanism where out-group trust seemed to have the greatest effect on ethnic voting among those with low exposure to information followed by medium level of information. Contrary to my theory, out-group trust seemed to have negligible effect among those with the highest exposure to information. Lastly, I find no significant support for the collective action mechanism. \paragraph{} This study is suffers from a number of limitations. First, the main outcome variable, ethnic voting, has two major drawbacks. First, party leaders may have more than one ethnicity to which they appeal to\cite{adidaSpousalBumpCrossEthnic2016}. As a result, my simplified method of matching the candidate's primary ethnicity to that of the respondents would result to Type II error (false negative). Voters could have responded to the candidates' secondary or spousal's ethnic appeal, but I could have miscoded it as non-ethnic voting because of my focus on the candidates' primary ethnicity. The second issue with this measurement is that not all parties mobilize constituents along ethnic lines. While some parties may mobilize their constituents according to their ethnic identity, others may appeal to their voters using other issues (e.g., income groups, ideology, policy, etc.). By assuming all parties as ethnic, I could have committed a Type I error (false positive), where I miscode respondent's voting decision as ethnic voting in cases where the non-ethnic party's ethnicity and the respondent's ethnicity happened to match. \paragraph{} Another potential limitation of this study is the number of countries included in the study. Since there are only ten observations in level-2, it is difficult to make significant comparisons across these countries. Usually, it is recommended a multi-level model have at least 20 observations at each level to detect cross-level interactions\cite{kreftIntroducingMultilevelModeling1998}. This, however, was not possible for this round of analysis as I wanted to use the Wave 3 Afrobarometer survey because it included questions that closely measured my independent variable, out-group trust. In the more recent waves of the Afrobarometer, they include the question, which asks "For each of the following types of people, please tell me whether you would like having people from this group as neighbors, dislike it, or not care." While this does get at out-group trust somewhat, it is not a direct measure. \paragraph{} This study makes some significant contribution to the literature on social capital and ethnic voting. While past studies have theorized and empirical proven social capital's effect on political participation, it has yet to make direct connections to voting in ethnically salient contexts. Results show that trust across ethnic groups has a significant effect on voting in places where politics is influenced by ethnic identity. Furthermore, this study also speaks to the ethnic voting literature as trust across ethnic groups can explain for some variation in the level of ethnic voting across individuals and countries. This shows that trust across ethnic group is taken into consideration when deciding who to vote for in the presidential election across Sub-Saharan countries. \paragraph{} To better understand the relationship between out-group trust and ethnic voting and its mechanisms, future research must do the following. First, ethnic voting at the individual must be measured more acurrately. As aforementioned, I should be able to distinguish between ethnic and non-ethnic parties, and better determine which ethnic groups the ethnic parties are mobilizing. Second, to expand the number of level-2 observations, I should be determine whether the question on having non-co-ethnics as neighbors is a good proxy for out-group trust or not. If this turns out to be a good proxy then recent waves of the Afrobarometer survey and the World Values survey data can be used to furhter determine the relationship between out-group trust and ethnic voting. Third, to better account for the mechanisms, I should come up with better measures for information receptivity and knowledge on the voting intentions of others. To measure whether an individual is being exposed to a wider variety of information on both co-ethnic and non-co-ethnic candidates, I should be able to determine whether the media sources they access are biased or not. It could be possible that one is accessing media channels that are biased towards one's in-group. In this case, they could be exposed to more information but it would be heavily biased, which may motivate one to vote along ethnic lines. To better account for the collective action mechanism, I need to find a better measure for one's knowledge of not only their in-group member's voting intentions but also that of the out-group members'. \pagebreak \section{Appendix} \subsection{Variable Measurement and Data Source} Afrobarometer Wave 3 (2005) \begin{itemize} \item Voting: Q99 If a presidential election were held tomorrow, which party's candidate would you vote for? \item Out-group trust: Q84D How much do you trust each of the following types of people: [Ghanaian/Kenyan/etc.] from other ethnic groups? (0=Not at all, 1=Just a little bit, 2=Somewhat, 3=A lot) \item Age: Q1 How old are you? \item Gender: Q101 Respondent's gender (1=Male, 2=Female) \item Education: Q90 What is the highest level of education you have completed? (0= No formal schooling, 1= Informal schooling (including Koranic schooling), 2=Some primary schooling, 3=Primary school completed, 4=Some secondary school/ High school, 5=Secondary school completed/High school, 6=Post-secondary qualifications, other than university e.g. a diploma or degree from a technical/polytechnic/college, 7=Some university, 8=University completed, 9=Post-graduate) \item Economic status: Q8E Over the past year, how often, if ever, have you or anyone in your family gone without: A cash income? (0=Never, 1=Just once or twice, 2=Several times, 3=Many times, 4=Always) \item Political trust: Q55B How much do you trust each of the following, or haven't you heard enough about them to say: The Parliament/National Assembly? (0=Not at all, 1=Just a little bit, 2=Somewhat, 3=A lot) \item Information access: Q15(A-C) How often do you get news from the following sources? Radio; Television; Newspaper (0=Never, 1=Less than once a month, 2=A few times a month, 3=A few times a week, 4=Every day). I added the responses from the three questions. \item Political discussion: Q17 When you get together with your friends or family, would you say you discuss political matters? (0=Never, 1=Occasionally, 2=Frequently) \item Majority: Q79 What is your tribe? You know, your ethnic or cultural group. \item In-group attachment: Q84C How much do you trust each of the following types of people: People from your own ethnic group? \item Proximity to the capital: square root of respondent's region of residence to the country's capital. \end{itemize} World Bank \begin{itemize} \item GDP per capita: log value of GDP per capita in year 2005. \end{itemize} Polity IV dataset \begin{itemize} \item Years of democracy: log value of years since the country turned from a polity score of below 6 to a 6 or above 6. \end{itemize} Huber's\cite{huberMeasuringEthnicVoting2012} ethnic voting dataset \begin{itemize} \item Group Voting Fractionalization: log of value, which measures the electoral distance between any two groups. The measure ranges from 0 to 1, where 1 refers to distance between the two groups, where all of \textit{i}'s supporters are from one group and all of \textit{j}'s supporters from a different group. \end{itemize} \subsection{Additional Models} \begin{figure}[H] \centering \includegraphics[scale=0.55]{additionalmodel} \end{figure} \bibliographystyle{plain} \bibliography{Diss} \end{document}
{ "alphanum_fraction": 0.8134650837, "avg_line_length": 231.4163822526, "ext": "tex", "hexsha": "e7cb32e9a870b07f35759466c7f620d84f82d7e1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a98cd27b914bd0ce15885a45883bb42e6aee66f6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hyowonshin/hyowonshin.github.io", "max_forks_repo_path": "files/trust.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a98cd27b914bd0ce15885a45883bb42e6aee66f6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hyowonshin/hyowonshin.github.io", "max_issues_repo_path": "files/trust.tex", "max_line_length": 2242, "max_stars_count": null, "max_stars_repo_head_hexsha": "a98cd27b914bd0ce15885a45883bb42e6aee66f6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hyowonshin/hyowonshin.github.io", "max_stars_repo_path": "files/trust.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14797, "size": 67805 }
%------------------------------------------------------------------------------ \chapter{Submitting your thesis} \label{sec:submit}\index{submit} %------------------------------------------------------------------------------ \LaTeX{} file: \url{./guide_submit.tex}\\[1ex] \noindent Questions often come up when your thesis is finished and now you have to print it and submit it. Both the \foreignquote{ngerman}{Promotionsbüro} and the \foreignquote{ngerman}{Prüfungsamt} have instructions on what you have to do, but it is sometimes not clear what this means in terms of the cover pages offered by this thesis framework. For the printed version of your thesis, you probably want \Package{hyperref} links and the table of contents to be black. In order to do this, you should uncomment the \Macro{hypersetup} command that is in the thesis main file, just after the \Macro{usepackage}\texttt{\{thesis\_defs\}}. %------------------------------------------------------------------------------ \section{PhD thesis} \label{sec:submit:phd}\index{submit!PhD thesis}\index{PhD} \input{./guide_phdsubmit} Appendix~\ref{app:printer} suggests some print shops that can make copies of your thesis in good enough quality to be accepted by the university library. %------------------------------------------------------------------------------ \section{Master/Diplom/Bachelor thesis} \label{sec:submit:other}\index{submit!MSc thesis}\index{submit!Diplom thesis}\index{submit!BSc thesis} \subsection{Submission} \begin{enumerate} \item Use the file \texttt{Master\_Submit\_Title.tex},\index{MSc} \texttt{Diplom\_Submit\_Title.tex}\index{Diplom} or\\ \texttt{Bachelor\_Title.tex}\index{BSc} for the title pages. These are selected by passing one of the options \Option{Master}, \Option{Diplom} or \Option{Bachelor} to the \Macro{documentclass} or the \Package{ubonn-thesis} package. In addition, pass the option \Option{Submit} to to the \Macro{documentclass} or the \Package{ubonn-thesis} package. \item You have to print and bind three copies of your thesis to be submitted to the \foreignlanguage{ngerman}{Prüfungsamt}. Nowadays these are usually in colour. \item The first and second referees for your thesis often like to have an extra copy of the thesis so that they can make comments when they read your thesis -- ask them if they want one. You can usually save the institute some money and print these copies in black \& white. \end{enumerate} Note that a CV does not have to be included in a Master/Diplom/Bachelor thesis. This is only needed when you submit a PhD thesis. \subsection{MSc/Diplom theses for the department library} \begin{enumerate} \item Use the file \texttt{Master\_Cover.tex} for the cover and \texttt{Master\_Final\_Title.tex}\footnote{Replace Master with Diplom as appropriate.} for the title pages. These are selected by passing the options \Option{Master, PILibrary}\footnote{% or \Option{Diplom, PILibrary}} to the \Macro{documentclass} or the \Package{ubonn-thesis} package. \item There are probably some small corrections you or the referees found during the time between submission and the completion of the referees' reports and grades. These should be corrected before you submit your thesis to the department library. \item The department library\index{department library}\index{library!department} (in the Physikalisches Institut) needs 2 printed copies with the file\\ \texttt{Master\_Cover.tex} as the cover. This is selected by passing the options \Option{Master, PILibrary} to the \Macro{documentclass} or the \Package{ubonn-thesis} package. You have to get the \enquote{BONN-IB-YYYY-nnn} number from the librarian. You should also include an abstract (in English) on the cover page. \item This version of the thesis is the one that you usually print if you need extra copies for your experiment or research group. \end{enumerate} \subsection{BSc theses} \begin{enumerate} \item There are probably some small corrections you or the referees found during the time between submission and the completion of the referees' reports and grades. These should be corrected before you print some extra copies of your thesis if your group wants them. \end{enumerate} %%% Local Variables: %%% mode: latex %%% TeX-master: "./thesis_guide" %%% End:
{ "alphanum_fraction": 0.7136948529, "avg_line_length": 42.6666666667, "ext": "tex", "hexsha": "4f2cbd484c49426b05454740ef8d50acac2ce3da", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "cmp0xff/Masterarbeit", "max_forks_repo_path": "ubonn-thesis-current/guide/guide_submit.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "cmp0xff/Masterarbeit", "max_issues_repo_path": "ubonn-thesis-current/guide/guide_submit.tex", "max_line_length": 102, "max_stars_count": null, "max_stars_repo_head_hexsha": "b29c84f9a29e4a7c9a3499658a1dfa7f87d64c9c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "cmp0xff/Masterarbeit", "max_stars_repo_path": "ubonn-thesis-current/guide/guide_submit.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1070, "size": 4352 }
\clearpage \subsection{Function} % (fold) \label{sub:function} \begin{figure}[h] \includegraphics[width=\textwidth]{topics/function-decl/diagrams/Function} \caption{Concepts related to Functions} \label{fig:function-decl-function} \end{figure} % subsection function (end)
{ "alphanum_fraction": 0.7689530686, "avg_line_length": 23.0833333333, "ext": "tex", "hexsha": "c0d239deb96b99a28a11305958969bf03a91861a", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z", "max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "macite/programming-arcana", "max_forks_repo_path": "topics/function-decl/concepts/function.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07", "max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z", "max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "thoth-tech/programming-arcana", "max_issues_repo_path": "topics/function-decl/concepts/function.tex", "max_line_length": 75, "max_stars_count": 1, "max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "thoth-tech/programming-arcana", "max_stars_repo_path": "topics/function-decl/concepts/function.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z", "num_tokens": 75, "size": 277 }
\documentclass{beamer} \usepackage{beamerthemesplit} \title{Example Presentation Created with the Beamer Package} \author{Till Tantau} \date{\today} \begin{document} \frame{\titlepage} \section[Outline]{} \frame{\tableofcontents} \section{Introduction} \subsection{Overview of the Beamer Class} \frame { \frametitle{Features of the Beamer Class} \begin{itemize} \item<1-> Normal LaTeX class. \item<2-> Easy overlays. \item<3-> No external programs needed. \end{itemize} } \end{document}
{ "alphanum_fraction": 0.73046875, "avg_line_length": 17.6551724138, "ext": "tex", "hexsha": "771d15921fd285af26967edbf3a8cbe485a31628", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1843078edb13e647c0261d1944320ffbcf02ad99", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "sshrdp/mclab", "max_forks_repo_path": "languages/AspectMatlab/presentations/aosdWorkshop/test/test.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1843078edb13e647c0261d1944320ffbcf02ad99", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "sshrdp/mclab", "max_issues_repo_path": "languages/AspectMatlab/presentations/aosdWorkshop/test/test.tex", "max_line_length": 60, "max_stars_count": 3, "max_stars_repo_head_hexsha": "1843078edb13e647c0261d1944320ffbcf02ad99", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "sshrdp/mclab", "max_stars_repo_path": "languages/AspectMatlab/presentations/aosdWorkshop/test/test.tex", "max_stars_repo_stars_event_max_datetime": "2020-06-07T02:06:09.000Z", "max_stars_repo_stars_event_min_datetime": "2017-07-24T23:54:17.000Z", "num_tokens": 148, "size": 512 }
\documentclass[main.tex]{subfiles} \begin{document} \marginpar{Wednesday\\ 2020-12-9, \\ compiled \\ \today} % We were discussing how the envelope of the neutron star can be treated in the plane-parallel approximation. % The conductivity tensor is % % % \begin{align} % k'_{ij} = \left[\begin{array}{cc} % k_\parallel & 0 \\ % 0 & k_\perp % \end{array}\right] % \,, % \end{align} % % % and so Therefore, the components of the tensor in the unprimed frame are % \begin{align} k_{ij} = \left[\begin{array}{cc} k_\parallel \cos^2 \phi + k_\perp \sin^2\phi & (k_\parallel - k_\perp) \sin \phi \cos \phi \\ (k_\parallel - k_\perp) \sin \phi \cos \phi & k_\parallel \sin^2 \phi + k_\perp \cos^2\phi \end{array}\right] \,, \end{align} % which will be used in the law % \begin{align} q_i = - \sum _{j} k_{ij } \pdv{T}{x _j} \,. \end{align} Since we are interested in the observable emission, let us consider \(q_z = q_2\), which will be given by % \begin{align} q_z = - k_{21} \pdv{T}{x} - k_{22} \pdv{T}{z} \,, \end{align} % and we know that this will correspond to blackbody emission: \(q_z = \sigma T^{4}\). Now, as rough estimates, if \(L\) is the depth of the crust and \(R\) is the radius of the NS, we can take \(\pdv*{T}{z} \approx (T_s - T_c ) / L\), while \(\pdv*{T}{x} \approx - T_s/R\). We know that \(T_s \ll T_c \), and \(L \ll R\): then, % \begin{align} \abs{ \pdv{T}{z}} \approx \abs{- \frac{T_0}{L} } \gg \pdv{T}{x} \,. \end{align} So, taking only the most significant term we get % \begin{align} \sigma T^{4} &= - k_{22} \pdv{T}{z} = - \qty( k_\parallel \sin^2 \phi + k_\perp \cos^2\phi) \pdv{T}{z} \\ &= \underbrace{- k_\parallel \pdv{T}{z}}_{\sigma T_P^{4}} \qty( \sin^2 \phi + \frac{k_\perp}{k_\parallel} \cos^2 \phi ) \,, \end{align} % where we defined the new temperature \(T_P\), since on dimensional grounds that term was a thermal emissivity. If we take \(\phi = \pi /2\) we get the temperature at the pole: therefore, \(T_P\) is the polar temperature. As long as \(k_\perp \ll k_\parallel\), we get \(\sigma T^{4} \approx \sigma T_P^{4} \sin^2 \phi \). We can also define the angle \(\Theta \), between \(\vec{B}\) and the radial direction, through \(\phi = \pi /2 - \Theta \). % \footnote{This is not a bad approximation --- the correct geometrical considerations yield \(\phi = \arctan(2 / \tan \theta )\), which is only slightly different from \(\phi = \pi / 2 - \Theta \).} % \todo[inline]{Is this true?} This then tells us that \(T = T_P \sqrt{\abs{\cos \Theta }}\). The dipolar magnetic field is given by % \begin{align} \vec{B} = \frac{B_p}{2} \qty(\frac{R}{r})^3 \qty(2 \cos \theta \hat{e}_r + \sin \theta \hat{e}_\theta ) \,, \end{align} % so on the surface it will read % \begin{align} \vec{B}(r = R) = \frac{B_p}{2} \qty(2 \cos \theta \hat{e}_r + \sin \theta \hat{e}_\theta ) \,. \end{align} The cosine of \(\Theta \), the angle between the \(\vec{B}\) field and the radial direction, is % \begin{align} \cos \Theta = \frac{\vec{B} \cdot \hat{e}_r}{\abs{\vec{B}}} = \frac{B_r}{B} = \frac{B_p}{2} \frac{2 \cos \theta }{B} \,, \end{align} % so, taking geometric considerations into account, we get % \begin{align} \cos \Theta = \frac{2 \cos \theta }{\sqrt{1 + 3 \cos^2 \theta }} \,. \end{align} Therefore, the temperature monotonically decreases going from the pole to the equator. At the equator it is not really zero, but it is an order of magnitude lower than the polar temperature. A plot of the approximated formula \(T/ T_P \sim \sqrt{\abs{\cos \Theta }}\) is shown in figure \ref{NS-temperature}. \begin{figure}[H] \centering \includegraphics[width=.7\textwidth]{figures/NS-temperature} \caption{Temperature as a function of \(\theta \). On the left we have the pole, on the right the equator; in reality the temperature will not drop to zero at \(\theta = \pi /2\), instead the conduction along \(x\) will become relevant.} \label{fig:NS-temperature} \end{figure} This has an important observational implication: the emission from the NS is not a single blackbody, but instead a superposition of a blackbody for each latitude. What we have done so far applies to the dipolar approximation, but nothing prevents higher order moments from being relevant. We also need to account for relativistic effects: \begin{enumerate} \item the emitted radiation is redshifted by a factor % \begin{align} \frac{\nu _\infty}{\nu } = \sqrt{1 - \frac{2GM}{c^2R}} \,, \end{align} % which typically comes out to \(\Delta \nu / \nu \sim \num{.2}\). This is reflected in a lower effective blackbody temperature. \item The luminosity at infinity is multiplied by a factor \((1 - 2GM/c^2R)\) compared to the one at the surface, because of the relativistic correction to the radius (\(R = R_\infty \sqrt{1 - 2GM/c^2R}\)), which factors into the surface area. \item The light rays' paths will be curved by the gravitational effect of the NS. Consider the point of emission of a photon on the surface, and denote as \(\alpha \) the angle between the radial direction at that point and the direction of emission; further, denote as \(\theta \) the angle between the radial direction and our line of sight. Then, it is a good approximation to say that the photon reaches us when \cite[]{beloborodovGravitationalBendingLight2002} % \begin{align} 1 - \cos \alpha = (1 - \cos \theta ) \qty(1 - \frac{2GM}{c^2R}) \,. \end{align} This means that we see more than half of the surface of the NS. The terminator lies at % \begin{align} \cos \theta _{\text{terminator}} = \qty( \frac{Rc^2}{2GM} -1 )^{-1} \,, \end{align} % which, for example, yields \SI{112}{\degree} for \(M = \num{1.4} M_{\odot}\) and \(R = \SI{15}{km}\). The effect this has is to allow us to continuously see the two magnetic hot spots (at the magnetic poles) even if the magnetic and rotation axes are misaligned. \end{enumerate} \section{Accretion onto Neutron Stars} We will need some characteristic lengths of the accreting system. We start by introducing the \textbf{light-cylinder radius} \(R_{LC}\). The star is rotating at angular velocity \(\Omega \), and in its vicinity (up to the distance we define to be \(R_{LC}\)) the magnetic field lines are corotating rigidly. We can calculate this radius by imposing that the motion of the field lines (or rather, of the particles which are forced to move along them) be subluminal: the threshold is found by \(v_\phi = R_{LC} \Omega = c\), so \(R_{LC} = c/ \Omega \). This is on the order of % \begin{align} R_{LC} = \frac{cP}{2 \pi } \approx \SI{5e9}{cm} \frac{P}{\SI{1}{s}} \,. \end{align} After this length, the magnetic field lines must disconnect. The \textbf{Alfvén radius} is the one at which the magnetic pressure equals the ram pressure; we will give a rough estimate by comparing energy densities:\footnote{This is conservative, we are considering ramp pressure as stronger than it really is: the pressure for the magnetic field will be of the same order of magnitude of its density (in natural units), while the same can be said for the matter only if it is relativistic.} % \begin{align} \frac{B^2}{8 \pi } = \frac{1}{2} \rho v^2 \,, \end{align} % and we can approximate the radius at which this occurs by using Bondi flow, therefore assuming spherical symmetry. This is not realistic, but it turns out to give a reasonable answer. It gives us % \begin{align} 4 \pi r^2\rho v = \dot{M} \,, \end{align} % therefore \(v = \sqrt{GM /r}\), and \(\rho = \dot{M} / 4 \pi r^2 v \), so % \begin{align} \frac{B^2}{8 \pi } = \frac{1}{2} \frac{\dot{M} v^2}{4 \pi r^2v} = \frac{1}{2} \frac{\dot{M}}{4 \pi r^2} \sqrt{ \frac{GM}{r}} \,, \end{align} % and let us consider \(B \approx (B_P /2) (R / r)^3\). This yields % \begin{align} \frac{B_p^2 R^{6}}{\sqrt{GM} \dot{M}} = r^{6} r^{-2} r^{-1/2} = r^{7/2} \,, \end{align} % therefore % \begin{align} r_A &= \qty(\frac{B_p^2 R^{6}}{\sqrt{GM} \dot{M}})^{2/7} \\ &\approx \SI{3e8}{cm} \times \qty(\frac{B_p}{\SI{e12}{G}})^{4/7} \qty(\frac{R}{\SI{e6}{cm}})^{18/7} \qty(\frac{M}{M_{\odot}})^{1/7} \qty(\frac{\dot{M}}{\SI{e17}{g/s}})^{-2/7} \,. \end{align} Now, since \(B^2 \propto r^{-6}\) while \(\rho v^2 \propto r^{-5/2}\), for radii smaller than \(r_A\) we have dominance of the magnetic pressure. An interesting fact to note is that for this calculation using the dipolar field only is fully justified, since higher order multipoles decay even faster as the radius increases. Typically, \(r_A < R_{LC}\), which justifies the use of the dipolar field in our treatment of \(B\)-dominated accretion. % Also, note that our reference value of \SI{e8}{G} for \(B_p\) is quite low --- as one can see in figure \ref{fig:p-pdot-diagram}, typical values range from this to much higher fields. % So, for highly magnetized NSs the Alfvén radii will be larger, potentially more than \(r_{LC}\), and we might need to consider higher order multipoles as well. \end{document}
{ "alphanum_fraction": 0.6687777778, "avg_line_length": 42.4528301887, "ext": "tex", "hexsha": "ced26a3ec63e321f6a8bf7e79cadd9ffd934cb5e", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z", "max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jacopok/notes", "max_forks_repo_path": "ap_third_semester/compact_objects/dec09.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jacopok/notes", "max_issues_repo_path": "ap_third_semester/compact_objects/dec09.tex", "max_line_length": 469, "max_stars_count": 6, "max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jacopok/notes", "max_stars_repo_path": "ap_third_semester/compact_objects/dec09.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z", "num_tokens": 2944, "size": 9000 }
\textbf{Write a function that solves Burgers equation \begin{align*} &u_t+uu_x=\varepsilon u_{xx},~~~~~x\in(0,1),~~t\in(0,t_{max}]\\ &u(0,x)=sin^4(\pi x) \end{align*} and periodic boundary conditions. Use Fourier to compute derivatives in space and ode113 to advance in time. Solve this PDE for $\varepsilon= 0.1,0.01,$ and $0.001$. In each case, can you find solutions that areaccurate to three digits at $t= 1$?} \newline Since the domain is $1$-periodic, in order to use Fourier DFT comfortably we will change the space variable $x$ to work with a PDE and boundary conditions $2\pi$-periodic. The convenient change of variable is $y = 2\pi x$. Now $y$ is $2\pi$-periodic. Then, the PDE is changed as follows, \begin{align*} &u_t+2\pi uu_y=4\pi^2\varepsilon u_{yy},~~~~~y\in(0,2\pi),~~t\in(0,t_{max}]\\ &u(0,y)=sin^4(y/2). \end{align*} We can easily solve this PDE using Fourier Derivatives, see code below. The errors I have obtained, using the infinity norm, are: \begin{center} \begin{tabular}{||c c c||} \hline $\varepsilon$ & $N$ & Error \\ [0.5ex] \hline\hline $0.1$ & $64$ & $0.4192\cdot 10^{-3}$ \\ \hline $0.01$ & $64$ & $0.4839\cdot 10^{-3}$ \\ \hline $0.001$ & $512$ & $0.9414\cdot 10^{-3}$ \\ \hline \end{tabular} \end{center} We can see how, the smaller the $\varepsilon$ (and therefore more insignificant the diffusion), the more difficult it is to obtain an accurate solution. \subsection*{Matlab code for this problem} \begin{verbatim} %% Problem 3 tmax = 1; eps = [1e-1 1e-2 1e-3]'; tol = 1e-3; mesh = zeros(length(eps),1); E = zeros(length(eps),1); for m=1:length(eps) j = 5; N = 2^j; error = 1; u = PDE_solve(N,tmax,eps(m),false); while (error>tol && N<2048) j = j+1; N = 2^j; ufine = PDE_solve(N,tmax,eps(m),false); error = norm(u(end,:)-ufine(end,2:2:end),inf); u = ufine; j end m mesh(m) = N; E(m) = error; end mesh E function [u,x]=PDE_solve(N,tmax,eps,movie) x0 = 0; xf = 2*pi; h = (xf-x0)/N; x = x0 + h*(1:N)'; u0 = sin(x/2).^4; tic; [t,u] = ode113(@(t,u) rhs(u,N,eps), [0 tmax], u0); toc if movie figure for k = 1:2:length(t) plot(x,u(k,:)); axis([x0 xf -.2 1.2]); drawnow end end end function y = rhs(u,N,eps) vhat = fft(u); what = 1i*[0:N/2-1 0 -N/2+1:-1]' .* vhat; what2 = -([0:N/2-1 N/2 -N/2+1:-1].^2)' .* vhat; ux = real(ifft(what)); uxx = real(ifft(what2)); y = 4*pi^2*eps*uxx-2*pi*u.*ux; end \end{verbatim}
{ "alphanum_fraction": 0.5996822875, "avg_line_length": 28.2921348315, "ext": "tex", "hexsha": "7d71991a0b54cd050a028af90a3123ae38f120b9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "12ab3e86a4a44270877e09715eeab713da45519d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fjcasti1/Courses", "max_forks_repo_path": "SpectralMethods/Homework2/Latex/problem3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "12ab3e86a4a44270877e09715eeab713da45519d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fjcasti1/Courses", "max_issues_repo_path": "SpectralMethods/Homework2/Latex/problem3.tex", "max_line_length": 287, "max_stars_count": null, "max_stars_repo_head_hexsha": "12ab3e86a4a44270877e09715eeab713da45519d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fjcasti1/Courses", "max_stars_repo_path": "SpectralMethods/Homework2/Latex/problem3.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 976, "size": 2518 }
\documentclass[11pt]{article} \usepackage[breakable]{tcolorbox} \usepackage{parskip} % Stop auto-indenting (to mimic markdown behaviour) \usepackage{iftex} \ifPDFTeX \usepackage[T1]{fontenc} \usepackage{mathpazo} \else \usepackage{fontspec} \fi % Basic figure setup, for now with no caption control since it's done % automatically by Pandoc (which extracts ![](path) syntax from Markdown). \usepackage{graphicx} % Maintain compatibility with old templates. Remove in nbconvert 6.0 \let\Oldincludegraphics\includegraphics % Ensure that by default, figures have no caption (until we provide a % proper Figure object with a Caption API and a way to capture that % in the conversion process - todo). \usepackage{caption} \DeclareCaptionFormat{nocaption}{} \captionsetup{format=nocaption,aboveskip=0pt,belowskip=0pt} \usepackage[Export]{adjustbox} % Used to constrain images to a maximum size \adjustboxset{max size={0.9\linewidth}{0.9\paperheight}} \usepackage{float} \floatplacement{figure}{H} % forces figures to be placed at the correct location \usepackage{xcolor} % Allow colors to be defined \usepackage{enumerate} % Needed for markdown enumerations to work \usepackage{geometry} % Used to adjust the document margins \usepackage{amsmath} % Equations \usepackage{amssymb} % Equations \usepackage{textcomp} % defines textquotesingle % Hack from http://tex.stackexchange.com/a/47451/13684: \AtBeginDocument{% \def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code } \usepackage{upquote} % Upright quotes for verbatim code \usepackage{eurosym} % defines \euro \usepackage[mathletters]{ucs} % Extended unicode (utf-8) support \usepackage{fancyvrb} % verbatim replacement that allows latex \usepackage{grffile} % extends the file name processing of package graphics % to support a larger range \makeatletter % fix for grffile with XeLaTeX \def\Gread@@xetex#1{% \IfFileExists{"\Gin@base".bb}% {\Gread@eps{\[email protected]}}% {\Gread@@xetex@aux#1}% } \makeatother % The hyperref package gives us a pdf with properly built % internal navigation ('pdf bookmarks' for the table of contents, % internal cross-reference links, web links for URLs, etc.) \usepackage{hyperref} % The default LaTeX title has an obnoxious amount of whitespace. By default, % titling removes some of it. It also provides customization options. \usepackage{titling} \usepackage{longtable} % longtable support required by pandoc >1.10 \usepackage{booktabs} % table support for pandoc > 1.12.2 \usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment) \usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout) % normalem makes italics be italics, not underlines \usepackage{mathrsfs} % Colors for the hyperref package \definecolor{urlcolor}{rgb}{0,.145,.698} \definecolor{linkcolor}{rgb}{.71,0.21,0.01} \definecolor{citecolor}{rgb}{.12,.54,.11} % ANSI colors \definecolor{ansi-black}{HTML}{3E424D} \definecolor{ansi-black-intense}{HTML}{282C36} \definecolor{ansi-red}{HTML}{E75C58} \definecolor{ansi-red-intense}{HTML}{B22B31} \definecolor{ansi-green}{HTML}{00A250} \definecolor{ansi-green-intense}{HTML}{007427} \definecolor{ansi-yellow}{HTML}{DDB62B} \definecolor{ansi-yellow-intense}{HTML}{B27D12} \definecolor{ansi-blue}{HTML}{208FFB} \definecolor{ansi-blue-intense}{HTML}{0065CA} \definecolor{ansi-magenta}{HTML}{D160C4} \definecolor{ansi-magenta-intense}{HTML}{A03196} \definecolor{ansi-cyan}{HTML}{60C6C8} \definecolor{ansi-cyan-intense}{HTML}{258F8F} \definecolor{ansi-white}{HTML}{C5C1B4} \definecolor{ansi-white-intense}{HTML}{A1A6B2} \definecolor{ansi-default-inverse-fg}{HTML}{FFFFFF} \definecolor{ansi-default-inverse-bg}{HTML}{000000} % commands and environments needed by pandoc snippets % extracted from the output of `pandoc -s` \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \newenvironment{Shaded}{}{} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}} \newcommand{\RegionMarkerTok}[1]{{#1}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\NormalTok}[1]{{#1}} % Additional commands for more recent versions of Pandoc \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}} \newcommand{\ImportTok}[1]{{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}} \newcommand{\BuiltInTok}[1]{{#1}} \newcommand{\ExtensionTok}[1]{{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} % Define a nice break command that doesn't care if a line doesn't already % exist. \def\br{\hspace*{\fill} \\* } % Math Jax compatibility definitions \def\gt{>} \def\lt{<} \let\Oldtex\TeX \let\Oldlatex\LaTeX \renewcommand{\TeX}{\textrm{\Oldtex}} \renewcommand{\LaTeX}{\textrm{\Oldlatex}} % Document parameters % Document title \title{Acoustics} % Pygments definitions \makeatletter \def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax% \let\PY@ul=\relax \let\PY@tc=\relax% \let\PY@bc=\relax \let\PY@ff=\relax} \def\PY@tok#1{\csname PY@tok@#1\endcsname} \def\PY@toks#1+{\ifx\relax#1\empty\else% \PY@tok{#1}\expandafter\PY@toks\fi} \def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{% \PY@it{\PY@bf{\PY@ff{#1}}}}}}} \def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}} \expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} \expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}} \expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}} \expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}} \expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}} \expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}} \expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}} \expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit} \expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf} \expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} \expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}} \expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \def\PYZbs{\char`\\} \def\PYZus{\char`\_} \def\PYZob{\char`\{} \def\PYZcb{\char`\}} \def\PYZca{\char`\^} \def\PYZam{\char`\&} \def\PYZlt{\char`\<} \def\PYZgt{\char`\>} \def\PYZsh{\char`\#} \def\PYZpc{\char`\%} \def\PYZdl{\char`\$} \def\PYZhy{\char`\-} \def\PYZsq{\char`\'} \def\PYZdq{\char`\"} \def\PYZti{\char`\~} % for compatibility with earlier versions \def\PYZat{@} \def\PYZlb{[} \def\PYZrb{]} \makeatother % For linebreaks inside Verbatim environment from package fancyvrb. \makeatletter \newbox\Wrappedcontinuationbox \newbox\Wrappedvisiblespacebox \newcommand*\Wrappedvisiblespace {\textcolor{red}{\textvisiblespace}} \newcommand*\Wrappedcontinuationsymbol {\textcolor{red}{\llap{\tiny$\m@th\hookrightarrow$}}} \newcommand*\Wrappedcontinuationindent {3ex } \newcommand*\Wrappedafterbreak {\kern\Wrappedcontinuationindent\copy\Wrappedcontinuationbox} % Take advantage of the already applied Pygments mark-up to insert % potential linebreaks for TeX processing. % {, <, #, %, $, ' and ": go to next line. % _, }, ^, &, >, - and ~: stay at end of broken line. % Use of \textquotesingle for straight quote. \newcommand*\Wrappedbreaksatspecials {% \def\PYGZus{\discretionary{\char`\_}{\Wrappedafterbreak}{\char`\_}}% \def\PYGZob{\discretionary{}{\Wrappedafterbreak\char`\{}{\char`\{}}% \def\PYGZcb{\discretionary{\char`\}}{\Wrappedafterbreak}{\char`\}}}% \def\PYGZca{\discretionary{\char`\^}{\Wrappedafterbreak}{\char`\^}}% \def\PYGZam{\discretionary{\char`\&}{\Wrappedafterbreak}{\char`\&}}% \def\PYGZlt{\discretionary{}{\Wrappedafterbreak\char`\<}{\char`\<}}% \def\PYGZgt{\discretionary{\char`\>}{\Wrappedafterbreak}{\char`\>}}% \def\PYGZsh{\discretionary{}{\Wrappedafterbreak\char`\#}{\char`\#}}% \def\PYGZpc{\discretionary{}{\Wrappedafterbreak\char`\%}{\char`\%}}% \def\PYGZdl{\discretionary{}{\Wrappedafterbreak\char`\$}{\char`\$}}% \def\PYGZhy{\discretionary{\char`\-}{\Wrappedafterbreak}{\char`\-}}% \def\PYGZsq{\discretionary{}{\Wrappedafterbreak\textquotesingle}{\textquotesingle}}% \def\PYGZdq{\discretionary{}{\Wrappedafterbreak\char`\"}{\char`\"}}% \def\PYGZti{\discretionary{\char`\~}{\Wrappedafterbreak}{\char`\~}}% } % Some characters . , ; ? ! / are not pygmentized. % This macro makes them "active" and they will insert potential linebreaks \newcommand*\Wrappedbreaksatpunct {% \lccode`\~`\.\lowercase{\def~}{\discretionary{\hbox{\char`\.}}{\Wrappedafterbreak}{\hbox{\char`\.}}}% \lccode`\~`\,\lowercase{\def~}{\discretionary{\hbox{\char`\,}}{\Wrappedafterbreak}{\hbox{\char`\,}}}% \lccode`\~`\;\lowercase{\def~}{\discretionary{\hbox{\char`\;}}{\Wrappedafterbreak}{\hbox{\char`\;}}}% \lccode`\~`\:\lowercase{\def~}{\discretionary{\hbox{\char`\:}}{\Wrappedafterbreak}{\hbox{\char`\:}}}% \lccode`\~`\?\lowercase{\def~}{\discretionary{\hbox{\char`\?}}{\Wrappedafterbreak}{\hbox{\char`\?}}}% \lccode`\~`\!\lowercase{\def~}{\discretionary{\hbox{\char`\!}}{\Wrappedafterbreak}{\hbox{\char`\!}}}% \lccode`\~`\/\lowercase{\def~}{\discretionary{\hbox{\char`\/}}{\Wrappedafterbreak}{\hbox{\char`\/}}}% \catcode`\.\active \catcode`\,\active \catcode`\;\active \catcode`\:\active \catcode`\?\active \catcode`\!\active \catcode`\/\active \lccode`\~`\~ } \makeatother \let\OriginalVerbatim=\Verbatim \makeatletter \renewcommand{\Verbatim}[1][1]{% %\parskip\z@skip \sbox\Wrappedcontinuationbox {\Wrappedcontinuationsymbol}% \sbox\Wrappedvisiblespacebox {\FV@SetupFont\Wrappedvisiblespace}% \def\FancyVerbFormatLine ##1{\hsize\linewidth \vtop{\raggedright\hyphenpenalty\z@\exhyphenpenalty\z@ \doublehyphendemerits\z@\finalhyphendemerits\z@ \strut ##1\strut}% }% % If the linebreak is at a space, the latter will be displayed as visible % space at end of first line, and a continuation symbol starts next line. % Stretch/shrink are however usually zero for typewriter font. \def\FV@Space {% \nobreak\hskip\z@ plus\fontdimen3\font minus\fontdimen4\font \discretionary{\copy\Wrappedvisiblespacebox}{\Wrappedafterbreak} {\kern\fontdimen2\font}% }% % Allow breaks at special characters using \PYG... macros. \Wrappedbreaksatspecials % Breaks at punctuation characters . , ; ? ! and / need catcode=\active \OriginalVerbatim[#1,codes*=\Wrappedbreaksatpunct]% } \makeatother % Exact colors from NB \definecolor{incolor}{HTML}{303F9F} \definecolor{outcolor}{HTML}{D84315} \definecolor{cellborder}{HTML}{CFCFCF} \definecolor{cellbackground}{HTML}{F7F7F7} % prompt \makeatletter \newcommand{\boxspacing}{\kern\kvtcb@left@rule\kern\kvtcb@boxsep} \makeatother \newcommand{\prompt}[4]{ \ttfamily\llap{{\color{#2}[#3]:\hspace{3pt}#4}}\vspace{-\baselineskip} } % Prevent overflowing lines due to hard-to-break entities \sloppy % Setup hyperref package \hypersetup{ breaklinks=true, % so long urls are correctly broken across lines colorlinks=true, urlcolor=urlcolor, linkcolor=linkcolor, citecolor=citecolor, } % Slightly bigger margins than the latex defaults \geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in} \begin{document} \maketitle \hypertarget{acoustics}{% \section{Acoustics}\label{acoustics}} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{1}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{o}{\PYZpc{}}\PY{k}{matplotlib} inline \end{Verbatim} \end{tcolorbox} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{2}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{o}{\PYZpc{}}\PY{k}{config} InlineBackend.figure\PYZus{}format = \PYZsq{}svg\PYZsq{} \PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np} \PY{k+kn}{from} \PY{n+nn}{exact\PYZus{}solvers} \PY{k}{import} \PY{n}{acoustics}\PY{p}{,} \PY{n}{acoustics\PYZus{}demos} \PY{k+kn}{from} \PY{n+nn}{IPython}\PY{n+nn}{.}\PY{n+nn}{display} \PY{k}{import} \PY{n}{IFrame}\PY{p}{,} \PY{n}{HTML}\PY{p}{,} \PY{n}{Image} \end{Verbatim} \end{tcolorbox} In this chapter we consider our first \emph{system} of hyperbolic conservation laws. We study the acoustics equations that were introduced briefly in \href{Introduction.ipynb}{Introduction}. We first describe the physical context of this system and then investigate its characteristic structure and the solution to the Riemann problem. This system is described in more detail in Chapter 3 of \cite{fvmhp}. If you wish to examine the Python code for this chapter, please see: \begin{itemize} \tightlist \item \url{exact_solvers/acoustics.py} \ldots{} \href{https://github.com/clawpack/riemann_book/blob/FA16/exact_solvers/acoustics.py}{on github,} \item \url{exact_solvers/acoustics_demos.py} \ldots{} \href{https://github.com/clawpack/riemann_book/blob/FA16/exact_solvers/acoustics_demos.py}{on github.} \end{itemize} \hypertarget{physical-setting}{% \subsection{Physical setting}\label{physical-setting}} The linear acoustic equations describe the propagation of small perturbations in a fluid. In \href{Advection.ipynb}{Advection} we derived the one-dimensional continuity equation, which describes mass conservation:\\ \begin{align} \label{Ac:continuity} \rho_t + (\rho u)_x & = 0. \end{align}\\ For more realistic fluid models, we need another equation that determines the velocity \(u\). This typically takes the form of a conservation law for the momentum \(\rho u\). Momentum, like density, is transported by fluid motion with corresponding flux \(\rho u^2\). Additionally, any difference in pressure will also lead to a flux of momentum that is proportional to the pressure difference. Thus the momentum equation takes the form\\ \begin{align} \label{Ac:mom_cons} (\rho u)_t + (\rho u^2 + P(\rho))_x & = 0, \end{align}\\ where the pressure \(P\) is is given by the equation of state \(P(\rho)\); here we have assumed the pressure depends only on the density. A more general equation of state will be considered, along with fully nonlinear fluid motions, in \href{Euler.ipynb}{Euler}. The linear acoustics equations focus on the behavior of small perturbations in the system above. In order to derive the equations of linear acoustics, observe that equations (\ref{Ac:continuity})-(\ref{Ac:mom_cons}) form a hyperbolic system \(q_t+f(q)_x=0\) with\\ \begin{align*} q & = \begin{bmatrix} \rho \\ \rho u \end{bmatrix} & f(q) & = \begin{bmatrix} \rho u \\ \rho u^2 + P(\rho) \end{bmatrix} \end{align*}\\ We will make use of the quasilinear form of a hyperbolic system: \[q_t + f'(q) q_x = 0.\]\\ Here \(f'(q)\) denotes the Jacobian of the flux \(f\) with respect to the conserved variables \(q\). In the present system, as is often the case, \(f\) is most naturally written in terms of so-called primitive variables (in this case \(\rho\) and \(u\)) rather than in terms of the conserved variables \(q\). In order to find the flux Jacobian (and thus the quasilinear form), we first write \(f\) in terms of the conserved variables \((q_1,q_2) = (\rho, \rho u)\):\\ \begin{align} f(q) & = \begin{bmatrix} q_2 \\ q_2^2/q_1 + P(q_1) \end{bmatrix}. \end{align} Now we can differentiate to find the flux Jacobian:\\ \begin{align*} f'(q) & = \begin{bmatrix} \partial f_1/\partial q_1 & \partial f_1/\partial q_2 \\ \partial f_2/\partial q_1 & \partial f_2/\partial q_2 \end{bmatrix} \\ & = \begin{bmatrix} 0 & 1 \\ -q_2^2/q_1^2 + P'(q_1) & 2q_2/q_1 \end{bmatrix} \\ & = \begin{bmatrix} 0 & 1 \\ P'(\rho)-u^2 & 2u \end{bmatrix}. \end{align*} Thus small perturbations to an ambient fluid state \(\rho_0, u_0\) evolve according to the linearized equations \(q_t + f'(q_0) q_x = 0\), or more explicitly \begin{align*} \rho_t + (\rho u)_x & = 0 \\ (\rho u)_t + (P'(\rho_0)-u_0^2)\rho_x + 2u_0(\rho u)_x & = 0. \end{align*}\\ As we are only interested in small perturbations of equation (\ref{Ac:mom_cons}), we expand the perturbations \(\rho-\rho_0\) and \(\rho u - \rho_0 u_0\) as functions of a small parameter \(\epsilon\), and then we discard terms of order \(\epsilon^2\) and higher. This results in the linear hyperbolic system\\ \begin{align*} p_t + u_0 p_x + P'(\rho_0) u_x & = 0 \\ u_t + \frac{1}{\rho_0} p_x + u_0 u_x & = 0, \end{align*} where \(p(x,t)\) is the pressure as a function of \(x\) and \(t\). If the ambient fluid is at rest (i.e.~\(u_0=0\)) and the pressure is directly proportional to the density, then this simplifies to \begin{align} \label{Ac:main} \left[ \begin{array}{c} p \\ u \end{array} \right]_t + \underbrace{\left[ \begin{array}{cc} 0 & K_0 \\ 1/\rho_0 & 0 \\ \end{array} \right]}_{\mathbf{A}} \left[ \begin{array}{c} p \\ u \end{array} \right]_x = 0, \end{align} where \(K_0=P'(\rho_0)\) is referred to as the bulk modulus of compressibility. The system of equations (\ref{Ac:main}) is called the linear acoustics equations. For the rest of this chapter we work with (\ref{Ac:main}) and let \(q=[p,u]^T\). Then we can write (\ref{Ac:main}) as \(q_t + A q_x = 0\). For simplicity, we also drop the subscripts on \(K, \rho\). Direct calculation reveals that the eigenvectors of \(A\) are \begin{align} \lambda_1 = -c, \qquad \lambda_2 = c \end{align} where \(c=\sqrt{{K}/{\rho}}\) is the speed of sound in a medium with a given density and bulk modulus. The right eigenvectors of \(A\) are given by \begin{align*} r_1 = \begin{bmatrix}\begin{array}{c}-Z\\1\end{array}\end{bmatrix}, \qquad r_2 = \begin{bmatrix}\begin{array}{c}Z\\1\end{array}\end{bmatrix}, \end{align*} where \(Z=\rho c\) is called the acoustic impedance. Defining \(R = [r_1 r_2]\) and \(\Lambda = diag(\lambda_1, \lambda_2)\), we have \(AR = R\Lambda\), or \(A = R \Lambda R^{-1}\). Substituting this into (\ref{Ac:main}) yields \begin{align*} q_t + A q_x & = 0 \\ q_t + R \Lambda R^{-1} q_x & = 0 \\ R^{-1}q_t + \Lambda R^{-1} q_x & = 0 \\ w_t + \Lambda w_x & = 0, \end{align*} where we have introduced the \emph{characteristic variables} \(w=R^{-1}q\). The last system above is simply a pair of decoupled advection equations for \(w_1\) and \(w_2\), with velocities \(\lambda_1\) and \(\lambda_2\); a system we already know how to solve. Thus we see that the eigenvalues of \(A\) are the velocities at which information propagates in the solution. \hypertarget{solution-by-characteristics}{% \subsection{Solution by characteristics}\label{solution-by-characteristics}} The discussion above suggests a strategy for solving the Cauchy problem: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Decompose the initial data \((p(x,0), u(x,0))\) into characteristic variables \(w(x,0)=(w_1^0(x),w_2^0(x,0))\) using the relation \(w = R^{-1}q\). \item Evolve the characteristic variables: \(w_p(x,t) = w_p^0(x-\lambda_p t)\). \item Transform back to the physical variables: \(q = Rw\). \end{enumerate} The first step in this process amounts to expressing the vector \(q\) in the basis given by \(r_1, r_2\). Solving the system \(Rw=q\) yields \begin{align*} q = w_1 r_1 + w_2 r_2, \end{align*} where \begin{align*} w_1 = \frac{- p + Z u}{2Z}, \ \ \ \ \ \ w_2 = \frac{ p + Z u}{2Z}. \end{align*} We visualize this below, where the first plot shows the two eigenvectors, and the second plot shows how \(q\) can be expressed as a linear combination of the two eigenvectors, \(r_1\) and \(r_2\). \emph{In the live notebook you can adjust the left and right states or the material parameters to see how this affects the construction of the Riemann solution.} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{3}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{o}{\PYZpc{}}\PY{k}{matplotlib} inline \end{Verbatim} \end{tcolorbox} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{4}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{o}{\PYZpc{}}\PY{k}{config} InlineBackend.figure\PYZus{}format = \PYZsq{}svg\PYZsq{} \PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np} \PY{k+kn}{from} \PY{n+nn}{exact\PYZus{}solvers} \PY{k}{import} \PY{n}{acoustics}\PY{p}{,} \PY{n}{acoustics\PYZus{}demos} \PY{k+kn}{from} \PY{n+nn}{IPython}\PY{n+nn}{.}\PY{n+nn}{display} \PY{k}{import} \PY{n}{IFrame} \end{Verbatim} \end{tcolorbox} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{5}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{decompose\PYZus{}q\PYZus{}interactive}\PY{p}{(}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{verbatim} interactive(children=(FloatSlider(value=1.0, description='p', max=1.0, min=-1.0), FloatSlider(value=0.3, descr… \end{verbatim} \begin{verbatim} VBox(children=(HBox(children=(FloatSlider(value=1.0, description='p', max=1.0, min=-1.0), FloatSlider(value=1.… \end{verbatim} \begin{verbatim} Output() \end{verbatim} In the second and third steps, we evolve the characteristic variables \(w\) and then transform back to the original variables. We take as initial pressure a Gaussian, with zero initial velocity. We visualize this below, where the time evolution in the characteristic variables is shown in the first plot, and the time evolution of the velocity is shown in the second plot. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{6}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{char\PYZus{}solution\PYZus{}interactive}\PY{p}{(}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{verbatim} interactive(children=(FloatSlider(value=0.0, description='t', max=1.2), FloatSlider(value=1.0, description='K'… \end{verbatim} \begin{verbatim} HBox(children=(VBox(children=(FloatSlider(value=0.0, description='t', max=1.2),)), VBox(children=(FloatSlider(… \end{verbatim} \begin{verbatim} Output() \end{verbatim} \emph{In the live notebook, you can advance the above solutions in time and select which of the two characteristic variables to display.} Notice how in the characteristic variables \(w\) (plotted on the left), each part of the solution simply advects (translates) since each of the characteristics variables simply obeys an uncoupled advection equation. \hypertarget{the-riemann-problem}{% \subsection{The Riemann problem}\label{the-riemann-problem}} Now that we know how to solve the Cauchy problem, solution of the Riemann problem is merely a special case. We have the special initial data\\ \begin{align*} q(x,0) = \begin{cases} q_\ell & \text{if } x \le 0, \\ q_r & \text{if } x > 0. \end{cases} \end{align*}\\ We can proceed as before, by decomposing into characteristic components, advecting, and then transforming back. But since we know the solution will be constant almost everywhere, it's even simpler to just decompose the jump \(\Delta q = q_r - q_\ell\) in terms of the characteristic variables, and advect the two resulting jumps \(\Delta w_1\) and \(\Delta w_2\):\\ \begin{align*} \Delta q = \Delta w_1 r_1 + \Delta w_2 r_2, \end{align*}\\ Since \(R\Delta w = \Delta q\), we have\\ \begin{align*} \Delta w_1 = \frac{-\Delta p + Z\Delta u}{2Z}, \ \ \ \ \ \ \Delta w_2 = \frac{\Delta p + Z\Delta u}{2Z}. \end{align*}\\ Thus the solution has the structure depicted below. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{7}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{Image}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{figures/acoustics\PYZus{}xt\PYZus{}plane.png}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{width}\PY{o}{=}\PY{l+m+mi}{350}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \prompt{Out}{outcolor}{7}{} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_18_0.png} \end{center} { \hspace*{\fill} \\} The three constant states are related by the jumps:\\ \begin{align} q_m = q_\ell + \Delta w_1 r_1 = q_r - \Delta w_2 r_2. \label{eq:acussol} \end{align}\\ The jumps in pressure and velocity for each propagating discontinuity are related in a particular way, since each jump is a multiple of one of the eigenvectors of \(A\). More generally, the eigenvectors of the coefficient matrix of a linear hyperbolic system reveal the relation between jumps in the conserved variables across a wave propagating with speed given by the corresponding eigenvalue. For acoustics, the impedance is the physical parameter that determines this relation. \hypertarget{a-simple-solution}{% \subsubsection{A simple solution}\label{a-simple-solution}} Here we provide some very simple initial data, and determine the Riemann solution, which consists of three states \(q_\ell\), \(q_m\) and \(q_r\), and the speeds of the two waves. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{8}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{c+c1}{\PYZsh{} Initial data for Riemann problem} \PY{n}{rho} \PY{o}{=} \PY{l+m+mf}{0.5} \PY{c+c1}{\PYZsh{} density} \PY{n}{bulk} \PY{o}{=} \PY{l+m+mf}{2.} \PY{c+c1}{\PYZsh{} bulk modulus} \PY{n}{ql} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{3}\PY{p}{,}\PY{l+m+mi}{2}\PY{p}{]}\PY{p}{)} \PY{c+c1}{\PYZsh{} Left state} \PY{n}{qr} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{3}\PY{p}{,}\PY{o}{\PYZhy{}}\PY{l+m+mi}{2}\PY{p}{]}\PY{p}{)} \PY{c+c1}{\PYZsh{} Right state} \PY{c+c1}{\PYZsh{} Calculated parameters} \PY{n}{c} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{bulk}\PY{o}{/}\PY{n}{rho}\PY{p}{)} \PY{c+c1}{\PYZsh{} calculate sound speed} \PY{n}{Z} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{bulk}\PY{o}{*}\PY{n}{rho}\PY{p}{)} \PY{c+c1}{\PYZsh{} calculate impedance} \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{With density rho = }\PY{l+s+si}{\PYZpc{}g}\PY{l+s+s2}{, bulk modulus K = }\PY{l+s+si}{\PYZpc{}g}\PY{l+s+s2}{\PYZdq{}} \PYZbs{} \PY{o}{\PYZpc{}} \PY{p}{(}\PY{n}{rho}\PY{p}{,}\PY{n}{bulk}\PY{p}{)}\PY{p}{)} \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{We compute: sound speed c = }\PY{l+s+si}{\PYZpc{}g}\PY{l+s+s2}{, impedance Z = }\PY{l+s+si}{\PYZpc{}g}\PY{l+s+s2}{ }\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{\PYZdq{}} \PYZbs{} \PY{o}{\PYZpc{}} \PY{p}{(}\PY{n}{c}\PY{p}{,}\PY{n}{Z}\PY{p}{)}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{Verbatim}[commandchars=\\\{\}] With density rho = 0.5, bulk modulus K = 2 We compute: sound speed c = 2, impedance Z = 1 \end{Verbatim} \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{9}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{c+c1}{\PYZsh{} Call and print Riemann solution} \PY{n}{states}\PY{p}{,} \PY{n}{speeds}\PY{p}{,} \PY{n}{reval} \PY{o}{=} \PYZbs{} \PY{n}{acoustics}\PY{o}{.}\PY{n}{exact\PYZus{}riemann\PYZus{}solution}\PY{p}{(}\PY{n}{ql} \PY{p}{,}\PY{n}{qr}\PY{p}{,} \PY{p}{[}\PY{n}{rho}\PY{p}{,} \PY{n}{bulk}\PY{p}{]}\PY{p}{)} \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{The states ql, qm and qr are: }\PY{l+s+s2}{\PYZdq{}}\PY{p}{)} \PY{n+nb}{print}\PY{p}{(}\PY{n}{states}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+se}{\PYZbs{}n}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)} \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{The left and right wave speeds are:}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)} \PY{n+nb}{print}\PY{p}{(}\PY{n}{speeds}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{Verbatim}[commandchars=\\\{\}] The states ql, qm and qr are: [[ 3. 5. 3.] [ 2. 0. -2.]] The left and right wave speeds are: [-2. 2.] \end{Verbatim} One way to visualize the Riemann solution for a system of two equations is by looking at the \(p-u\) phase plane. In the figure below, we show the two initial conditions of the Riemann problem \(q_\ell\) and \(q_r\) as points in the phase space; the lines passing through these points correspond to the eigenvectors, \(r_1\) and \(r_2\). The middle state \(q_m\) is simply the intersection of the line in the direction \(r_1\) passing through \(q_\ell\) and the line in the direction \(r_2\) passing through \(q_r\). The structure of this solution becomes evident from equation (\ref{eq:acussol}). The dashed lines correspond to a line in the direction \(r_2\) passing through \(q_\ell\) and a line in the direction \(r_1\) passing through \(q_r\); these also intersect, but cannot represent a Riemann solution since they would involve a wave going to the right but connected to \(q_\ell\) and a wave going to the left but connected to \(q_r\). In the live notebook, the cell below allows you to interactively adjust the initial conditions the material parameters as well as the plot range, so that you can explore how the structure of the solution in the phase plane is affected by these quantities. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{10}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{interactive\PYZus{}phase\PYZus{}plane}\PY{p}{(}\PY{n}{ql}\PY{p}{,}\PY{n}{qr}\PY{p}{,}\PY{n}{rho}\PY{p}{,}\PY{n}{bulk}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{verbatim} interactive(children=(FloatSlider(value=3.0, description='$p_l$', max=10.0, min=0.01), FloatSlider(value=2.0, … \end{verbatim} \begin{verbatim} Tab(children=(VBox(children=(HBox(children=(FloatSlider(value=3.0, description='$p_l$', max=10.0, min=0.01), F… \end{verbatim} \begin{verbatim} Output() \end{verbatim} Note that the eigenvectors are given in terms of the impedance \(Z\), which depends on the density \(\rho\) and the bulk modulus \(K\). Therefore, when \(\rho\) and \(K\) are modified the eigenvectors change and consequently the slope of the lines changes as well. \hypertarget{examples}{% \subsection{Examples}\label{examples}} We will use the exact solver in \url{exact_solvers/acoustics.py} and the functions in \url{exact_solvers/acoustics_demos.py} to plot interactive solutions for a few examples. \hypertarget{shock-tube}{% \subsubsection{Shock tube}\label{shock-tube}} If there is a jump in pressure and the velocity is zero in both initial states (the shock tube problem) then the resulting Riemann solution consists of pressure jumps of equal magnitude propagating in both directions, with equal and opposite jumps in velocity. This is the linearized version of what is known in fluid dynamics as a shock tube problem, since it emulates what would happen inside a shock tube, where the air is initially stationary and a separate chamber at the end of the tube is pressurized and then released. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{11}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{ql} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{5}\PY{p}{,}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{n}{qr} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{,}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{n}{rho} \PY{o}{=} \PY{l+m+mf}{1.0} \PY{n}{bulk} \PY{o}{=} \PY{l+m+mf}{4.0} \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{riemann\PYZus{}plot\PYZus{}pplane}\PY{p}{(}\PY{n}{ql}\PY{p}{,}\PY{n}{qr}\PY{p}{,}\PY{n}{rho}\PY{p}{,}\PY{n}{bulk}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{verbatim} interactive(children=(FloatSlider(value=0.0, description='$t$', max=1.0), Dropdown(description='Characs.', opt… \end{verbatim} \begin{verbatim} HBox(children=(FloatSlider(value=0.0, description='$t$', max=1.0), Dropdown(description='Characs.', options=(N… \end{verbatim} \begin{verbatim} Output() \end{verbatim} We can also observe the structure of the solution in the phase plane. In the second plot, we show the structure of the solution in the phase plane. \hypertarget{reflection-from-a-wall}{% \subsubsection{Reflection from a wall}\label{reflection-from-a-wall}} As another example, suppose the pressure is initially the same in the left and right states, while the velocities are non-zero with \(u_r = -u_\ell > 0\). The flow is converging from both sides and because of the symmetry of the initial states, the result is a middle state \(q_m\) in which the velocity is 0 (and the pressure is higher than on either side). \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{12}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{ql} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{2}\PY{p}{,}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)} \PY{n}{qr} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{array}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{2}\PY{p}{,}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)} \PY{n}{rho} \PY{o}{=} \PY{l+m+mf}{1.0} \PY{n}{bulk} \PY{o}{=} \PY{l+m+mf}{1.5} \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{riemann\PYZus{}plot\PYZus{}pplane}\PY{p}{(}\PY{n}{ql}\PY{p}{,}\PY{n}{qr}\PY{p}{,}\PY{n}{rho}\PY{p}{,}\PY{n}{bulk}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{verbatim} interactive(children=(FloatSlider(value=0.0, description='$t$', max=1.0), Dropdown(description='Characs.', opt… \end{verbatim} \begin{verbatim} HBox(children=(FloatSlider(value=0.0, description='$t$', max=1.0), Dropdown(description='Characs.', options=(N… \end{verbatim} \begin{verbatim} Output() \end{verbatim} We again show the Riemann solution in space and in the phase plane, where the symmetry is also evident. Disregarding the left half of the domain (\(x<0\)), one can view this as a solution to the problem of an acoustic wave impacting a solid wall. The result is a reflected wave that moves away from the wall; notice that the velocity vanishes at the wall, as it must. This type of Riemann solution is important when simulating waves in a domain with reflecting boundaries. The reflecting condition can be imposed by the use of fictitious \emph{ghost cells} that lie just outside the domain and whose state is set by reflecting the interior solution with the symmetry just described (equal pressure, negated velocity). In reality, at a material boundary only part of a wave is reflected while the rest is transmitted. This can be accounted for by including the spatial variation in \(\rho, K\) and solving a variable-coefficient Riemann problem. \hypertarget{interactive-phase-plane-with-solution-at-fixed-time}{% \subsubsection{Interactive phase plane with solution at fixed time}\label{interactive-phase-plane-with-solution-at-fixed-time}} For a more general exploration of the solution to the acoustics equation, we now show an interactive solution of the acoustics equations. The initial states \(q_\ell\) and \(q_r\) can be modified by dragging and dropping the points in the phase plane plot (in the notebook version, or on \href{http://www.clawpack.org/riemann_book/phase_plane/acoustics_small.html}{this webpage}). \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{13}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{IFrame}\PY{p}{(}\PY{n}{src}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{phase\PYZus{}plane/acoustics\PYZus{}small\PYZus{}notitle.html}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{width}\PY{o}{=}\PY{l+m+mi}{980}\PY{p}{,} \PY{n}{height}\PY{o}{=}\PY{l+m+mi}{340}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{tcolorbox}[breakable, size=fbox, boxrule=.5pt, pad at break*=1mm, opacityfill=0] \prompt{Out}{outcolor}{13}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] <IPython.lib.display.IFrame at 0x7fa3748f3780> \end{Verbatim} \end{tcolorbox} \hypertarget{gaussian-initial-condition}{% \subsubsection{Gaussian initial condition}\label{gaussian-initial-condition}} In this example, we use the first example described near the beginning of this chapter. The initial condition is a Gaussian pressure perturbation, while the initial velocity is zero. Reflecting boundary conditions are imposed at \(x=-2\) and \(x=2\), so the wave is fully reflected back, and we can see how it interacts with itself. This animation is produced using a numerical method from \href{http://www.clawpack.org/pyclaw/}{PyClaw}, and can be viewed in the interactive notebook or on \href{http://www.clawpack.org/riemann_book/html/acoustics_bump_animation.html}{this webpage}. \begin{tcolorbox}[breakable, size=fbox, boxrule=1pt, pad at break*=1mm,colback=cellbackground, colframe=cellborder] \prompt{In}{incolor}{14}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] \PY{n}{anim} \PY{o}{=} \PY{n}{acoustics\PYZus{}demos}\PY{o}{.}\PY{n}{bump\PYZus{}animation}\PY{p}{(}\PY{n}{numframes} \PY{o}{=} \PY{l+m+mi}{50}\PY{p}{)} \PY{n}{HTML}\PY{p}{(}\PY{n}{anim}\PY{p}{)} \end{Verbatim} \end{tcolorbox} \begin{tcolorbox}[breakable, size=fbox, boxrule=.5pt, pad at break*=1mm, opacityfill=0] \prompt{Out}{outcolor}{14}{\boxspacing} \begin{Verbatim}[commandchars=\\\{\}] <IPython.core.display.HTML object> \end{Verbatim} \end{tcolorbox} % Add a bibliography block to the postdoc \end{document}
{ "alphanum_fraction": 0.6700193095, "avg_line_length": 48.3353846154, "ext": "tex", "hexsha": "f4fbfc62012808071af806eac33ca7daf6340411", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "81479366c2be728101d6499c27b95ebf5fcdf7a3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alsam/Claw.jl", "max_forks_repo_path": "src/acoustics_1d_example1/jupyter/Acoustics.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "81479366c2be728101d6499c27b95ebf5fcdf7a3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alsam/Claw.jl", "max_issues_repo_path": "src/acoustics_1d_example1/jupyter/Acoustics.tex", "max_line_length": 226, "max_stars_count": 2, "max_stars_repo_head_hexsha": "81479366c2be728101d6499c27b95ebf5fcdf7a3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alsam/Claw.jl", "max_stars_repo_path": "src/acoustics_1d_example1/jupyter/Acoustics.tex", "max_stars_repo_stars_event_max_datetime": "2020-01-14T06:22:20.000Z", "max_stars_repo_stars_event_min_datetime": "2018-12-24T01:58:45.000Z", "num_tokens": 16917, "size": 47127 }
\chapter{Installation and User guide} \section{Downloading the program} The program may be downloaded from the following URLs: \begin{itemize} \item \href{http://cns-web.bu.edu/~satra/pub/vtcalcs.tgz} {http://cns-web.bu.edu/~satra/pub/vtcalcs.tgz - for Unix} \item \href{http://cns-web.bu.edu/~satra/pub/vtcalcs.zip} {http://cns-web.bu.edu/~satra/pub/vtcalcs.zip - for MS Windows} \end{itemize} \section{Installation} The program can be installed in any directory. However the directory structure has to be maintained when unpacking. \begin{enumerate} \item Choose a directory for installation. \item Copy the archived file into the directory. \item Unzip the file while \em maintaining the directory structure \em. On UNIX systems one may use the command: \begin{center} gunzip -c vtcalcs.tgz $|$ tar xf \end{center} On Microsoft Windows systems either use an unzip program like Winzip or use pkunzip with the command: \begin{center} pkunzip -d vtcalcs.zip \end{center} Most linux systems support the following command directly: \begin{center} tar zxf vtcalcs.tgz \end{center} \item A directory called vtcalcs will be created with three subdirectories: data, doc, and src. `data' contains some necessary data files for running the program. `doc' contains this document. `src' contains the source code for compiling the mex functions. \item If you are installing it on a system for which mex files have not been provided you need to create the mex files. This is described in the next section. \end{enumerate} \section{Creating the mex files} Creating the mex files requires that you have the Matlab compiler installed properly. For installing the Matlab compiler please refer to the documentation provided by Mathworks. On a MS Windows system this typcially requires both the Matlab compiler as well as an additonal compiler such as Microsoft Visual C++. Matlab v5.3 works fine however v5.2 and below is known to have problems on newer Linux systems and can result in segmentation violations. The following URL should help solve the problem: \begin{center} \href{http://www.mathworks.com/support/solutions/v5/11129.shtml} {http://www.mathworks.com/support/solutions/v5/11129.shtml} \end{center} \begin{enumerate} \item Start Matlab \item Change to the source directory. \item At the Matlab prompt type Makefile('unix') or Makefile('windows').The function is case sensitive. If the compiler has been set up properly, the call should have created mex files for you specific platform. \item Copy or move the mex files from the `src' directory to the vtcalcs directory. The files will have different extensions on different platforms. Some common platforms and extensions are listed below. Please refer to the compiler documentation for any other platforms.\\ \begin{center} \begin{tabular}{|c|c|} \hline Platform & Extension \\ \hline Windows 9x/NT & .dll \\ \hline Linux & .mexlx \\ \hline Solaris & .mex4 \\ \hline \end{tabular} \end{center} \end{enumerate} \section{Using the program} To start the program open Matlab in the same directory where the m-file vtcalcs.m is. On Unix this can be accomplished by starting Matlab in the directory containing the file. On Windows one can change the directory from within Matlab by using the `cd' command. Once you are in the directory start the program by typing vtcalcs at the Matlab prompt. This launches the user interface (UI) for the program. The UI has three menu options relevant to running the program. These are: \begin{itemize} \item VT Calculation \item Tract configuration \item Physical constants \end{itemize} \subsection{VT Calculation from Models} The current version provides 5 different methods of calculating the transfer function. When selected it provides the user with the 5 options. The sixth option is currently not available and has been disabled. Choosing any of the other options opens a dialog box with rather intuitive controls. Most of the pushbuttons with numeric values on them popup dialogs which allow those values to be changed. Some push buttons toggle states and some perform a particular action (eg. synthesize). The sliders change values continuously. The allowable range of the values are provided in Appendix A. \subsection{Changing Tract Configuration} When this menu option is selected a dialog box with four pushbuttons pop up. Clicking on any of the buttons toggles its state and the current state is displayed on the button. \subsection{Changing Physical Constants} When this menu option is selected a dialog box with five pushbuttons pop up. Clicking on any of the buttons opens a dialog box where one can enter a new value. If no value is entered, the current value is retained. This dialog also keeps the entered value within range of possibly allowed values. \section{Registered Trademarks} \begin{itemize} \item Matlab is a registered trademark of Mathworks Inc. \item Microsoft Visual C++, Microsoft Windows, MS Windows, Windows 9x/NT are registered trademarks of Microsoft corporation. \item Solaris is a registered trademark of Sun Microsystems. \end{itemize}
{ "alphanum_fraction": 0.7682296651, "avg_line_length": 41.4682539683, "ext": "tex", "hexsha": "401a57a66c72c454fd02d35b6717906e5e33c602", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2022-01-03T10:25:06.000Z", "max_forks_repo_forks_event_min_datetime": "2015-07-25T12:40:34.000Z", "max_forks_repo_head_hexsha": "671ace2557a0923926948c644b70d66131980c6c", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "bambooforest/VocalTractModels", "max_forks_repo_path": "vtcalcs/doc/install.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "671ace2557a0923926948c644b70d66131980c6c", "max_issues_repo_issues_event_max_datetime": "2022-01-03T17:37:32.000Z", "max_issues_repo_issues_event_min_datetime": "2017-12-06T02:04:52.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "bambooforest/VocalTractModels", "max_issues_repo_path": "vtcalcs/doc/install.tex", "max_line_length": 99, "max_stars_count": 4, "max_stars_repo_head_hexsha": "671ace2557a0923926948c644b70d66131980c6c", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "bambooforest/VocalTractModels", "max_stars_repo_path": "vtcalcs/doc/install.tex", "max_stars_repo_stars_event_max_datetime": "2021-02-04T13:52:54.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-20T13:34:36.000Z", "num_tokens": 1239, "size": 5225 }
\subsection{Experiment Setup} \label{Experiment_Setup} The experiment consisted of the AAC subsystem, with six sampling bags, and the CAC coiled tube subsystem. Shown in Figure {\ref{fig:3D_tubular_render}}, the AirCore was fitted into the CAC box, and the alternative sampling system with bags in the AAC box, together with the pneumatic system and the electronics placed inside the \emph{Brain}. The principal aim was to validate the AAC sampling method. To do so, it was necessary to sample during Descent Phase in order to compare the results with the ones obtained from the CAC. This was because the CAC collected its air sample passively by pressure differentials in the descent. Flight speeds mentioned in this section were obtained from the BEXUS manual as well as through analysis of past flights. Figure \ref{fig:block-diagram} shows a generic block diagram of the main subsystems interconnection. \begin{figure}[H] \begin{align*} \includegraphics[width=1\linewidth]{4-experiment-design/img/Mechanical/tubular_render_labels.jpg} \end{align*} \caption{Physical Setup of the Experiment.} \label{fig:3D_tubular_render} \end{figure} \begin{figure}[H] \begin{align*} \includegraphics[width=1\linewidth]{4-experiment-design/img/Mechanical/Block-Diagram.png} \end{align*} \caption{Block Diagram of the Experiment.} \label{fig:block-diagram} \end{figure} The primary concern regarding the AAC air sampling subsystem occured after the cut-off while the gondola was tumbling and falling at an average speed of 50 m/s for approximately two minutes \cite{BexusManual}. This descent speed was too large in order to sample air at the desired vertical resolution, capped at 500 m. As such, sampling could only be done after the gondola had stabilized at a descent speed of 8 m/s \cite{BexusManual}. The tumbling phase was vertically spanned for approximately 8 km. With a Float Phase altitude of approximately 27.3 km, sampling during the Descent Phase would have commenced at approximately 19 km in altitude. However, the primary region of interest in terms of sampling was in the stratosphere, particularly between 19 km and 27.3 km in altitude. This was why sampling was planned to also occur during the Ascent Phase. Out of the six sampling bags present in the payload, two were planned to be used during the Ascent Phase at 18 km and 21 km and four during the Descent Phase at 17.5 km, 16 km, 14 km and 12 km as seen in Table \ref{tab:minimum-volume}. Details regarding the sampling strategy can be found in Appendix \ref{sec:appH}. %\input{4-experiment-design/tables/samplingaltitudes.tex} The maximum pressure that the sampling bags could withstand had to be taken into account in order to avoid bursting. Decreasing pressure during the Ascent Phase would have posed a risk to sampling bags which already contained samples as the gas inside would expand which may cause the bag to burst. In order to avoid this, the sampling bags were not planned to be completely filled. Filling the sampling bags up to a maximum pressure of 2 psi/0.14 bar/140 hPa or alternatively filling the sampling bag up to 80\% of its capacity was recommended by the manufacturers for the Multi-Layer Foil sampling bags that were used. Therefore, the expected maximum pressure inside the bags, that were filled during the Ascent Phase, would be 1.6 psi/0.11 bar/110 hPa. The inverse was also true for the Descent Phase where compression would occur. As such, the sampling bags had to be fully filled during the Descent Phase in order to ensure that enough samples were collected for analysis. During the Descent Phase, the expected maximum pressure inside the bags was expected to be 1.98 psi/0.13 bar/130 hPa. Past research had revealed that the selected sampling bags were able to withstand pressure difference of 310 hPa at 30 km of altitude, which was equivalent to 0.31 bar \cite{LISA}. Test 16 and 18, shown in Table \ref{tab:sampling-system-test} respective Table \ref{tab:pump-low-pressure-test}, were conducted in order to confirm the maximum allowable pressure for the bags. The maximum operating pressure for the tubes, according to the manufacturers, was 2.2 psi/0.15 bar/150 hPa. The valve's leakage rate, given by the manufacturers, was 0.001 l/min. Due to the difference in pressure between sea level and sampling altitudes, the volume of the sample taken would have been considerably reduced when it reached sea level. This shrinking had to be taken into account as the minimum volume that had to be present in the sampling bag at sea level in order to obtain results with the Picarro analyzer. A minimum amount was required for the analyzer to detect concentrations of the targeted trace gases. This minimum amount was 0.18 L at sea level and it had to be specially considered for the samples taken at higher altitudes. The samples taken at lower altitudes were exposed to smaller changes in pressure, therefore their size was not critically reduced. Table \ref{tab:minimum-volume} shows the minimum volume of air that was needed to be sampled at different altitudes in order to assure the minimum air sample of 0.18L left at sea level. \\ This was the worst case scenario, and testing had shown that the higher the volume of the air sample left at sea level, the better the results. This was why the aimed volume of the samples, at sea level was at least 0.6L. %and the corresponding temperature and pressure conditions %pressure and temperature (288 K) % Depending on the sampling altitude,there is a minimum volume of air that needs to be sampled in order the sample volume left at sea level pressure is at least 0.18 L. A sample volume of 0.18 L corresponds to the minimum amount required for the Picarro analyzer to detect concentrations of the targeted trace gases. \input{4-experiment-design/tables/minimumvolume.tex} The AAC needed an air pump for sampling due to low ambient pressure at stratospheric altitudes. The air pump was also needed in order to assure the intake flow rate and obtain a good resolution. An air pump with an intake rate of at least 3 L/min was used to ensure that the vertical resolution of the sampling air remained under 500 m during the Ascent Phase's ascent speed of 5 m/s and the Descent Phase's descent speed of 8 m/s. A flushing valve (see Figure \ref{pneumatic_system}, No.23) was used to flush the AAC system before each bag would have been filled and make sure that each bag would have been filled with fresh air from the corresponding altitude. This filling/flushing procedure was planned to occur twice, the first time during the Ascent Phase for the first two sampling bags and the second time during the Descent Phase for the remaining four sampling bags. Shortly after the launch, the CAC valve was opened in order to allow the fill gas that was inside the tube to flush, while the AAC valves were closed until reaching the sampling altitude. Flushing of the CAC tube happened passively through the progressive decrease in air pressure during the balloon's Ascent Phase and it was emptied by the time it reached the Float Phase. Filling of the CAC tube also happened passively through the progressive increase in air pressure during the balloon's Descent Phase. The CAC valve was planned to remain open at all time during the Ascent, Float, and Descent phases. Due to some problems, it was briefly closed and opened again for a few times without really compromising the results. The valve should have been closed just before hitting the ground in order to preserve the sample. The ambient pressure was measured by three pressure sensors located outside the experiment box. Only one of them was necessary for AAC and CAC, but using three, redundancy was provided. To measure the pressure inside the bag that was currently being filled, one analogue static pressure sensor was connected to the pneumatic system. To measure the ambient temperature in the CAC, three sensors were allocated in the CAC box (in the Styrofoam). Temperature inside the coil was assumed to quickly adjust to the ambient temperature inside the CAC box, therefore there would not be differentiation in temperature between the air inside the tube and the air surrounding the tube. For the bags three more temperature sensors were placed in the bags' box (in the Styrofoam). To control the temperature for the pump and the valves in pneumatic subsystem, one temperature sensor was used for each of them. In total, there were three pressure sensors and eight temperature sensors. The sampling of the AAC was triggered by the pressure reading from the sensors outside the experiment box. When the required pressure was reached, as seen in Table \ref{tab:minimum-volume} the valve inside the manifold corresponding to the bag that was to be sampled, should have opened and the sampling should have started. The closing of the valve depended on two conditions and it was triggered when either one of the conditions was true. These conditions were: maximum sampling time or maximum pressure difference between inside/outside the bags. They were determined from past research \cite{LISA}. A first estimation of the maximum sampling time had already been made, from Test 18 shown in Table \ref{tab:pump-low-pressure-test}. Completed tests, such as Test 14 and Test 18, shown in Table \ref{tab:vacuum-test} respective Table \ref{tab:pump-low-pressure-test}, the maximum pressure condition had been determined and the maximum sampling times had been confirmed. The CAC emptying as well as the AAC and CAC sampling sequence is represented in Figures \ref{fig:ascent} and \ref{fig:descent}. It should be kept in mind that the different pressures were what should have triggered the opening of the valves. \begin{figure}[H] \begin{align*} \includegraphics[width=1\linewidth]{4-experiment-design/img/ascent-phase.jpeg} \end{align*} \caption{The Emptying and Sampling Sequence-Ascent Phase.} \label{fig:ascent} \end{figure} \begin{figure}[H] \begin{align*} \includegraphics[width=1\linewidth]{4-experiment-design/img/descent-phase.jpeg} \end{align*} \caption{The Emptying and Sampling Sequence-Descent Phase.\label{fig:descent}} \end{figure} In the diagrams, 0 denotes closed/off and 1 denotes opened/on. The horizontal axis denotes the different pressure levels throughout the flight, with p$_0$ being the sea level pressure and p$_8$ being the pressure during Float Phase. The ambient pressure dependent timeline of the experiment was planned to be as follow: \textbf{Ascent Phase:}\\ $p_0$ – $p_1$ \begin{itemize} \item CAC valve shall be closed. \item AAC valves shall be closed. \end{itemize} $p_1$ – $p_2$ \begin{itemize} \item CAC valve shall be opened. \item CAC tube shall start flushing. \end{itemize} $p_2$ – $p_3$ \begin{itemize} \item AAC flushing valve shall be opened, allowing for the system to flush. \item CAC valve should remain open. \end{itemize} $p_3$ – $p_4$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 1 shall be opened, allowing for air to enter the first bag. \item CAC valve should remain open. \end{itemize} $p_4$ – $p_5$ \begin{itemize} \item Valve 1 shall be closed. \item AAC flushing valve shall be closed. \item CAC valve should remain open. \end{itemize} $p_5$ - $p_6$ \begin{itemize} \item AAC flushing valve shall be opened, allowing the system to flush. \item CAC valve should remain open. \end{itemize} $p_6$ - $p_7$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 2 shall be opened, allowing for air to enter the second bag. \item CAC valve should remain open. \end{itemize} $p_7$ - $p_8$ \begin{itemize} \item Valve 2 shall be closed. \item AAC flushing valve shall be closed. \item CAC shall finish flushing. \end{itemize} \textbf{\\Float Phase:}\\ No action was taken other than continued telemetry. \textbf{Descent Phase:} $p_9$ – $p_{10}$ \begin{itemize} \item CAC shall start sampling. \item AAC valves shall be closed. \end{itemize} $p_{10}$ – $p_{11}$ \begin{itemize} \item AAC flushing valve shall be opened allowing the system to flush. \item CAC valve should remain open. \end{itemize} $p_{11}$ – $p_{12}$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 3 shall be opened, allowing for air to enter the third bag. \item CAC valve should remain open. \end{itemize} $p_{12}$ – $p_{13}$ \begin{itemize} \item Valve 3 shall be closed. \item AAC flushing valve shall be closed. \item CAC valve should remain open. \end{itemize} $p_{13}$ – $p_{14}$ \begin{itemize} \item AAC flushing valve shall be opened allowing the system to flush. \item CAC valve should remain open. \end{itemize} $p_{14}$ – $p_{15}$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 4 shall be opened, allowing for air to enter the fourth bag. \item CAC valve should remain open. \end{itemize} $p_{15}$ – $p_{16}$ \begin{itemize} \item Valve 4 shall be closed. \item AAC flushing valve shall be closed. \item CAC valve should remain open. \end{itemize} $p_{16}$ – $p_{17}$ \begin{itemize} \item AAC flushing valve shall be opened, allowing the system to flush. \item CAC should remain open. \end{itemize} $p_{17}$ – $p_{18}$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 5 shall be opened, allowing for air to enter the fifth bag. \item CAC valve should remain open. \end{itemize} $p_{18}$ – $p_{19}$ \begin{itemize} \item Valve 5 shall be closed. \item AAC flushing valve shall be closed. \item CAC valve should remain open. \end{itemize} $p_{19}$ – $p_{20}$ \begin{itemize} \item AAC flushing valve shall be opened, allowing the system to flush. \item CAC valve should remain open. \end{itemize} $p_{20}$ – $p_{21}$ \begin{itemize} \item AAC flushing valve shall be closed. \item Valve 6 shall be opened, allowing for air to enter the sixth bag. \item CAC valve should remain open. \end{itemize} $p_{pre-landing}$ \begin{itemize} \item Valve 6 shall be closed. \item AAC flushing valve shall be closed. \item CAC valve shall be opened. \end{itemize} $p_{0-landing}$ \begin{itemize} \item CAC valve shall be closed. \end{itemize} Note: The AAC system's air pump is only on during sampling into the air sampling bags and flushing of the system. \raggedbottom
{ "alphanum_fraction": 0.7615857826, "avg_line_length": 67.1100917431, "ext": "tex", "hexsha": "8331945055a35d7ebd6bea5ebaeb40040f5e40fd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c0db957167dfc90c25743af64c514fce837c1405", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "georgeslabreche/tubular-bexus-sed", "max_forks_repo_path": "4-experiment-design/4.1-experiment-setup.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c0db957167dfc90c25743af64c514fce837c1405", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "georgeslabreche/tubular-bexus-sed", "max_issues_repo_path": "4-experiment-design/4.1-experiment-setup.tex", "max_line_length": 1471, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c0db957167dfc90c25743af64c514fce837c1405", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "georgeslabreche/tubular-bexus-sed", "max_stars_repo_path": "4-experiment-design/4.1-experiment-setup.tex", "max_stars_repo_stars_event_max_datetime": "2018-01-17T10:38:07.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-17T10:38:07.000Z", "num_tokens": 3573, "size": 14630 }
\documentclass[DM,lsstdoc,toc]{lsstdoc} \usepackage{graphicx} \usepackage{url} \usepackage{latexsym} \usepackage{color} % black, blue, brown, cyan, darkgray, gray, green, lightgray, lime, magenta, blue, orange, pink, purple, red, teal, violet, white, yellow. \usepackage{enumitem} \title[LSST Special Programs]{Data Management \\ and LSST Special Programs} \author{M.~L.~Graham, M.~Juri\'{c}, K.-T.~Lim, and E.~Bellm} \setDocRef{DMTN-065} \date{\today} \setDocUpstreamLocation{\url{https://github.com/lsst-dm/dmtn-065}} \setDocAbstract{This document provides an in-depth description of the role of the LSST Project in preparing software and providing computational resources to process the data from Special Programs (deep drilling fields and/or mini-surveys). The plans and description in this document flow down from the requirements in \citeds{LSE-61} regarding processing for Special Programs. The main target audience is the LSST Data Management (DM) team, but members of the community who are preparing white papers on science-driven observing strategies may also find this document useful. The potential diversity of data from Special Programs is summarized, including boundaries imposed by technical limitations of the LSST hardware. The capability of the planned Data Management system to processes this diversity of Special Programs data is the main focus of this document. Case studies are provided as examples of how the LSST software and/or user-generated pipelines may be combined to process the data from Special Programs.} \setDocChangeRecord{% \addtohist{1}{2017-11-14}{Status: internal working document.}{Melissa Graham} \addtohist{1}{2018-06-17}{Updated to finalize and issue.}{Melissa Graham} %\addtohist{2}{yyyy-mm-dd}{Future changes}{Future person} } \begin{document} \maketitle % CITATION EXAMPLES % \verb|\citellp|: \citellp{LPM-17, LSE-30} \\ % \verb|\citell|: (SRD; \citell{LPM-17,LSE-29}) \\ % \verb|\citep[][]|: \citep[e.g.,][are interesting]{LPM-17,LSE-29} \\ % \verb|\cite|: \cite{LPM-17,LSE-29} % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \section{Introduction} \label{sec:intro} The main LSST science goals will be met by the Wide-Fast-Deep (WFD) Main Survey, but this is expected to be accomplished with $85$--$90$\% of the observing time available over the $10$ year survey. The remaining $10$--$15$\% of the time will be spent on Special Programs: alternative survey areas and/or observing strategies driven by a specific science goal. A call for white papers that provide scientific motivation and observing strategies for the WFD survey and Special Programs, \citeds{Document-28382}, released in June 2018, has a deadline of November 30 2018. It is conceivable that Special Programs might obtain imaging data that are significantly different from the WFD main survey, and/or requires special processing in order to achieve the program's science goals. This document provides an in-depth description of the role of the LSST Project in preparing to process the data from Special Programs (expanding on the general description in Section 6 of \citeds{LSE-163}). The main target audience is the LSST Data Management (DM) team, but members of the science community who are preparing white paper proposals may also find this document useful. {\bf Special Programs Terminology -- } The WFD main survey will survey the sky with a series of {\bf visits}, and each field will be visited $>800$ times over $10$ years. A {\bf standard visit} is composed of $2\times15$ second exposures (commonly referred to as ``snaps") and an {\bf alternative standard visit} is composed of a single $30$ second exposure. A {\bf non-standard visit} is any other exposure time(s) or number of snaps. Special Programs are typically divided into two types: {\bf Deep Drilling}, a single pointing for which many exposures are obtained in a relatively short amount of time (e.g., $>2\times$ as many visits in six months as the WFD will obtain in 10 years); and {\bf Mini-Surveys}, which refer to either new sky areas observed with a WFD-like survey, or sky areas within the WFD but observed with a specialized strategy. {\bf The LSST Project's Role in Processing Special Programs Data -- } The formal requirements regarding LSST's role in processing Special Programs data are in \citeds{LSE-61}, and the following statements have been derived from those requirements. The LSST Project will not take formal responsibility for specialized data reduction algorithms needed to process data, including images taken in non-standard modes. The term "specialized algorithms" refers to software that is not already within scope of the LSST Data Management (DM) science pipelines, and may include, for example: difference imaging for short exposures in which the PSF is not well-formed, shift-and-stack for faint moving objects, or any software with computational needs that significantly surpass the processing budget per image (compared to the processing of a WFD image). The Project will incorporate Special Programs data into the Prompt and/or Data Release processing pipelines and data products of the WFD Main Survey, such as {\tt Alerts}, {\tt CoAdds}, or {\tt Source} and {\tt Object} catalogs (with appropriate flags; LSST data products are described in \citeds{LSE-163}), whenever this (1) can be accomplished with existing software, and (2) is scientifically beneficial to that data product. The Project will also reconfigure its pipelines to generate separate imaging and catalog data products for Special Programs, whenever this can be accomplished with existing software. Finally, the Project will enable user-generated processing via the Science Platform (\citeds{LSE-319}), which will provide software tools and computational resources for (re)processing LSST data. {\bf Document Overview -- } The purpose of this document is to assess the potential diversity of imaging data that might be obtained by Special Programs (Section \ref{sec:data}), and to explore and clarify whether -- and to what extent -- DM's planned pipelines and user services will be able to handle this diversity of data (Section \ref{sec:dmplans}). As described in the call for white papers on cadence optimization (\citeds{Document-28382}), Special Programs that may require specialized or computationally-intense algorithms to meet their science goals are required to describe how these processing needs will be met. Sections \ref{sec:data} and \ref{sec:dmplans} have been designed to help white paper authors figure out whether their proposed Science Program(s) require processing that would be considered out of DM's scope, and in Section \ref{sec:SPCS} we have provided a series of science-driven case studies as examples. % In Appendix \ref{sec:docrev}, we endeavor to ensure that the DM's plans for Special Programs processing are appropriately expressed in all of the major Project documents and that the computational system and human work-hours needed to accomplish Special Programs processing have been accurately evaluated. This content will only be of interest to the DM team. % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \clearpage \section{The Potential Diversity of Special Programs Data} \label{sec:data} To define the extent to which Data Management will be able to process data from Special Programs, a comprehensive understanding of the potential diversity of data from Special Programs -- compared to the WFD's sky survey of standard visits -- is needed. To build up this comprehensive understanding, in Section \ref{ssec:data_bounds} we considered any technical limitations that the facility and instrumentation will or might place on the data. We find that the hardware imposes few boundaries on how data can be obtained, but that a high number of filter changes and/or long slews are inefficient due to their large overheads. The minimum exposure time is $1$ second (stretch goal: $0.1$ seconds), but there is currently a technical boundary that limits the readout rate to $1$ every $15$ seconds. Next, we considered the Special Programs that have been openly discussed so far in the Science Community (Appendix \ref{sec:data_prev}). Based on this consideration, most of the proposed Special Programs are likely to use visits that are similar to the WFD Main Survey, but some will require exposures that are significantly shorter, or that are obtained with a bright sky background during twilight. The cadence and patterns may also differ from the WFD main survey, such as long series of exposures obtained of the same field (i.e., deep drilling), or a strategy optimized to find very fast-moving objects. In addition, images of very crowded fields in the Galactic Plane may be included (at all or more often than the WFD Main Survey). It does not appear that any of the previously proposed Special Programs violate the hardware-imposed boundaries discussed in Section \ref{ssec:data_bounds}. % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \subsection{Hardware Boundaries}\label{ssec:data_bounds} Here we consider the technical boundaries on the diversity of data products that are expected to (or may) be imposed by limitations from the camera, telescope, and/or site (boundaries imposed by DM processing capabilities are considered in Section \ref{sec:dmplans}). {\bf Filter Changes -- } The maximum time for filter change is $120$ seconds: $30$ seconds for the telescope to reorient the camera to its nominal zero angle position on the rotator, and $90$ seconds to the camera subsystem for executing the change (OSS-REQ-0293; \citeds{LSE-30}). Assuming that most Special Programs would be designed to keep overheads $<100\%$ and would be using standard $30$ second visits, the filter change time indicates that it is likely that at least $4$ exposures in a given filter would be obtained between filter changes -- but this is not actually a technical boundary. {\bf Filter Carousel Loads -- } As described in \citeds{Document-28382}, the filter carousel can hold five of the six LSST filters at a time, and filter loads are done in the day. The system is designed to support $3000$ loads and $100000$ filter changes in $15$ years, which is an average of $17$ changes per night (after accounting for filter changes during calibrations). Individual filters will support $30000$ changes in $15$ years. Based on these technical boundaries, we know that there will never be data in more than five filters in a given night. {\bf Exposure Times -- } The minimum exposure time is $1$ second, with a stretch goal of $0.1$ seconds (OSS-REQ-0291; \citeds{LSE-30}). The maximum exposure time is not restricted. The readout time is $2$ seconds, and would be significant overhead on short exposures. Images with exposure times $<15$ seconds may still have to be separated by $15$ seconds for thermal tolerance; i.e., that the minimum readout rate is one image every $15$ seconds, regardless of exposure time (OSS-REQ-0291; \citeds{LSE-30}). We therefore consider the $15$ interval between images a technical boundary on the potential diversity of data products. % $\bullet$ However, for exposure times there are other considerations. The minimum exposure time needed to create an image with a PSF that is well-formed enough for difference imaging is a separate question. Changing the exposure time also affects the photometric and astrometric calibrations. Assuming a 1 second exposure can be reduced and calibrated, its detected point sources will span $13 < r < 21$ magnitudes, whereas a 15 second exposure saturates at $r\sim15.8$ mag. A 150 second image would saturate at $r\sim18.3$, perhaps leaving too few stars overlapping with e.g., templates or WFD images, for astrometric and photometric calibrations. Additionally, the impact on CR rejection routines is untested for long exposures. {\bf Telescope Slew -- } As described in \citeds{Document-28382}, large slews would have considerable overheads, but there are no technical boundaries on the size of a single slew or the accrued slew distance. {\bf Telescope Tracking --} The requirement that the LSST system be able to perform non-sidereal tracking is set by OSS-REQ-0380 in \citeds{LSE-30}. This capability will include angular rates of up to $220$ arcseconds per second in both azimuth and elevation. {\bf Camera Rotation -- } There requirements on the rotator's capabilities do not set any limits on the per-night or total lifetime rotation (OSS-REQ-0301, -0300; \citeds{LSE-30}) which might put boundaries on the distance between successive visits or the ability to jump between two widely separated fields. JIRA ticket DM-12573 is currently open and asking for clarification from the camera team on this. Until then, we assume there are no technical boundaries imposed by camera rotation constraints on the potential diversity of data products. % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \clearpage \section{Processing Special Programs Data with LSST DM Pipelines} \label{sec:dmplans} In this section we go into greater detail regarding the Projects' role in processing data from Special Programs, as introduced in Section \ref{sec:intro}. We discuss the ability of the planned DM pipelines to: process diverse imaging data that are unlike the WFD's standard visits in Section \ref{ssec:dmplans_NSV}; incorporate Special Programs data into the Prompt and Data Release pipelines and data products for the WFD main survey in Section \ref{ssec:dmplans_WFD}; reconfigure the pipelines and generate unique sets of data products for each Special Program in Section \ref{ssec:dmplans_reconfig}; and enable user-generated pipelines and data products in Section \ref{ssec:dmplans_user}. % % % % % % % % % % % % % % % % % % \subsection{Processing Diverse Imaging Data Unlike the WFD Main Survey}\label{ssec:dmplans_NSV} As described in Section \ref{sec:data}, Special Programs will be able -- and likely -- to request observing modes with shorter (or longer) exposure times, long sequences of visits to the same field, and/or imaging of very crowded fields. We review the capability of DM's planned pipelines to process such each of these kinds of diverse data, keeping in mind that the processing boundaries might ultimately be defined not by what is technically possible, but by the resulting image quality parameters such as the number of stars with sufficient flux for photometric calibration or the source area density (crowding). Furthermore, that these boundaries imposed by the data quality might not be constrained until the final performance of DM's algorithms, as described in the Data Management Applications Design (\citeds{LDM-151}) document, is fully characterized. \subsubsection{Exposure Times}\label{sssec:dmplans_NSV_expt} Images which deviate significantly from the $15$ second duration for the WFD main survey may encounter issues in the instrument signature removal routine, in the correction for differential chromatic refraction, in the difference imaging analysis pipeline, and/or in the photometric and astrometric calibrations due to a differently sampled set of standard stars per CCD. We discuss shorter and longer exposure in turn. \textbf{Shorter Exposures.} The camera constraint on the minimum supported exposure time is currently 1 second (stretch goal 0.1 seconds). The minimum exposure time for an image to be successfully reduced with Instrument Signature Removal (ISR) is under consideration (see JIRA ticket DM-12574). Assuming that 1 second exposure can be reduced and calibrated, its detected point sources will span a dynamic range of $r \approx 12.9$ -- $21.0$ magnitudes. A template image built on $15$ second exposures will saturate at $r \approx 15.8$, but this still leaves stars between $15.8$--$21.0$ magnitudes to be used in the PSF-matching (and all other filters have a similarly large overlap). However, in order for an image to be successfully PSF-matched to the template, the PSF must be well formed (no speckle pattern), and have a spatial variance that the pipeline is capable of modeling (be smoothly varying on some minimal scale). As a simple demonstration, Figure \ref{fig:expt} shows that perhaps exposure times shorter than $2$ seconds do not have a well-formed PSF (using the centroid of a 2D Gaussian fit as a proxy for "well-formed"). The minimum exposure time for an image to be successfully processed by the Difference Imaging Analysis (DIA) pipeline is currently under consideration (see JIRA ticket DM-12574). \begin{figure} \begin{center} \includegraphics[width=14cm,trim={0cm 0cm 0cm 0cm}, clip]{figures/exptime.png} \caption{At left, Arroyo atmosphere-only simulated PSF for LSST (with oversampled pixels) with exposure times of 0.5, 2, and 15 seconds (top to bottom), courtesy of Bo Xin. At right, blue and purple lines show the location of the centroid derived from a 2D Gaussian fit to the PSF as a function of exposure time, with the red dashed line showing the true center. We can see that for exposure times greater than 2 seconds, the centroid converges near its true value. \label{fig:expt}} \end{center} \end{figure} \textbf{Longer Exposures.} There is no maximum exposure time specified for an LSST image. Given that the template image will be a stack of at least a year or two of data, processing a $5$--$10$ times deeper single image through the difference imaging pipeline should be fine. However, a $2\times150$ second exposure would saturate at $r \approx 18.3$, and cosmic-ray rejection completeness might suffer (unknown), which could impact the quality of a difference image and the detected sources. Additionally, any system qualities that vary on short (but $>30$ second) timescales could inhibit photometric calibration (e.g., tracking). A potential maximum exposure time from a processing perspective is currently under consideration (see JIRA ticket DM-12574). % In conversation with DM-AP team members (Reiss, Findeisen, Connolly, Bo) there has not yet been a study of the safe range of exposure times that will be allowed to contribute to the Level 1 and Alert Stream. One possibly useful study is Chang et al. (2012), "Atmospheric point spread function interpolation for weak lensing in short exposure imaging data". They show that a 15 second exposure contains PSF variability on short spatial scales across a 1 square degree image which, for extragalactic fields with few stars (i.e., but good for weak lensing), is hard to characterize. They present a new software package to do mitigate the effects. Alternatively, we may need to use software packages \texttt{PhoSim} (Peterson et al. 2015; \citep{2015ApJS..218...14P}) or \texttt{ARROYO} \citep{2004SPIE.5497..290B} to at least simply characterize the PSF stability as a function of exposure time. % In conversation with K.-T., exposure times of 10 to 120 seconds will not be a problem for the processing pipeline to accommodate, but the ingest rate of images and the number of exposure per visit are the aspects that could cause issues (discussed below). % Is it conceivable that the LSST would spend time on a Special Program that will acquire short exposures if DM's algorithms cannot be shown to adequately reduce and calibrate them? This is probably best left ot a policy issue (beyond the scope of this document). \subsubsection{Number of Exposures per Visit (Long Sequences of a Single Field)} There is no processing constraint on the number of consecutive exposures that could be obtained of a single field. From a DM perspective, it would be best if these exposures were packaged into visits of no more than 2 exposures per visit, to minimize the need to reconfigure of the pipelines, and because the camera only ``clears" between visits. % K.-T. Lim has pointed out that an odd number of exposures is a non-standard visit; two snaps is hardwired into the code. This is baked-in to a configuration so that the pipeline can have a definition of what kind of timing delay constitutes ``late". Moving away from 2 exposures per visit requires a configuration change to the pipelines, which incurs an overhead (up to 1 minute) -- in fact, K.-T. things that between $10$ and $120$ seconds exposure times can easily be handled by the pipeline (i.e., can be run through ISR using scaled calibration frames), so long as they come in pairs. The real problem is knowing how long the processing should take, and not killing a process that is taking longer because there were 4 snaps in the visit instead of 2. To accommodate non-standard visits requires that the scheduler pass on the information of the number of snaps in the visit (\ref{DMSR-1}). Then the processing pipeline will know to, e.g., not attempt to difference the two snaps in the case were there is an odd number of snaps in a visit. \textit{MLG -- I've heard rumors of a CR regarding alternate standard visits of $1\times30$ seconds, but do not know the status or implications of this.} % K.-T. has also pointed out that currently, a deep drilling field would be interpreted as a single visit of 50 exposures by the scheduler. One implication of this is that since the camera only ``clears" prior to a new visit, it would not do this for the entire 50-exposure sequence. The processing pipeline would need to know how to divide this sequence up into visits. As there is no current requirement for DM to receive the information that the scheduler is about to do a 50 exposure visit, we need \ref{DMSR-1} to add the proposal ID and the number of exposures per visit to the meta data, and then it should be OK for DM to parse this visit information in the reduction pipeline. \subsubsection{Images in Very Crowded Fields} The LSST pipelines' performance in crowded fields is documented in \citeds{DMTN-077}, which finds that, e.g., in Galactic Plane regions with a source density of $500000$ sources per square degree, the completeness drops to 50\% at $20.2$ magnitudes. The slide deck at \citeds{Document-27962} also describes DM's plans for processing crowded fields. These may or may not be appropriate for Special Programs data, depending on the science goals. \subsubsection{Twilight Images with a Bright Background} Images obtained during twilight for scientific purposes are also likely to have shorter exposure times, and so the issues described in Section \ref{sssec:dmplans_NSV_expt} also apply here. Whether or not bright-background images can (or shall) be fully processed -- reduced, calibrated, background-subtracted, and delivered with astrometric and photometric solutions -- or whether this will require a User-Generated pipeline, is TBD (see also the example in Section \ref{ssec:SPCS_Twilight}). This may depend on the exposure time and the number of stars available in the image. \subsubsection{Images Obtained with Non-Sidereal Tracking} Non-sidereal tracking leads to images in which stars are streaked, but the moving object appears as a point source. Full processing -- providing reduced, calibrated, background-subtracted images that are delivered with astrometric and photometric solutions -- of these images is beyond the scope of the DM pipelines as it would require the development of new algorithms, and will need to be done as a User-Generated pipeline. The first steps of such a pipeline, such as Instrument Signature Removal, will probably be possible to achieve by reconfiguring the relevant DM software tasks. % % % % % % % % % % % % % % % % % % \subsection{Including Special Programs Data in the WFD Main Survey's Data Products}\label{ssec:dmplans_WFD} As described in Section \ref{sec:intro}, the Project may incorporate Special Programs data into the WFD main survey's pipelines and data products whenever this is (1) possible and (2) scientifically beneficial. This will likely be at the discretion of the data quality assessment team during Operations. In the following three sections we project when and how Special Programs data might be incorporated into the pipelines and data products of the Prompt pipeline and the Alert stream (Section \ref{ssec:dmplans_prompt}) and the annual Data Release pipeline (Section \ref{ssec:dmplans_drp}). \subsubsection{The Prompt Pipeline and Alert Generation}\label{ssec:dmplans_prompt} It would be beneficial to transient science to include as many LSST images into the Difference Imaging Analysis (DIA) pipeline and the Alert Stream as possible. Only images that can be processed with the Prompt DIA pipeline with the $60$-second timeline can contribute to the Alert Stream. As discussed in Section \ref{sec:data}, this might prohibit exposures shorter than $<15$ seconds and/or visit cadences shorter than $1$ per $\sim30$ seconds. This might also prohibit the inclusion of very crowded fields that require more computational resources. A field must have an LSST template image to be processed by the DIA pipeline, which would prohibit immediate Alerts from new survey fields. It should be possible to load and use an alternative template image (than what would be used for that field if and when it is covered by the WFD main survey) in the Prompt pipeline for fields from a Special Program. There may be a couple of issues encountered with Alerts from many consecutive visits of Deep Drilling fields. One is that, since the Alert contains the full record of all associated {\tt DIASources} from the past 12 months (\citeds{LSE-163}), for a Deep Drilling Field with significantly more visits over the year, the size of the Alert might become prohibitively large (TBD). Another is that the self-consistency of the \texttt{DIAObjects} catalog may suffer during consecutive visits of a single field. For example, the processing for image $2$ of a sequence would begin when the processing for image $1$ is only halfway complete. Any new {\tt DIASource} in image $1$ that cannot be associated (by coordinate) with an existing {\tt DIAObject} becomes a new {\tt DIAObject}. When this source is again detected in image $2$, another new {\tt DIAObject} would be created if the catalog has not yet been updated. The best solution might be to flag {\tt Alerts} which may suffer from incomplete {\tt DIASource}--{\tt DIAObject} associations (see JIRA ticket DM-12574). % \textbf{Templates:} The template images that are used in the Level 1 difference imaging pipeline will be built from the Level 2 DRP, and so the first factor affecting a Special Programs image's suitability for Level 1 is to be in a region of sky with an existing template. So long as there is a template, when the exposure time is equivalent to a WFD visit image, $\sim 30$ seconds, treating the image as Level 1 is going to be fine. When processing Special Programs data with the Level 1 pipeline, certain science cases might call for the capability to load and use a certain template that is different from the WFD template (i.e., built over a different timescale). K.-T. confirms that there is not enough memory allotted to store more than one template over the whole sky, but for sub-regions, storing and using an alternative template should be possible. This is not an issue limited to Special Programs, since during commissioning it is conceivable that multiple template versions. K.-T. also confirms that the capability for the processing pipeline to choose a given template based on the programID in the raw image metadata will exist. % \textbf{Processing backup:} Are there options for short-term increases in parallel processing power at NCSA? (Such options might be needed anyway to process crowded fields with $>$10k \texttt{DIASources}.) The bandwidth needed to load templates at a faster rate is also a concern, since templates have twice as many pixels, if the data acquistion rate is surveying at twice the WFD main survey rate, that's a $4\times$ additional bandwidth load. However, at NCSA there should be ways to elastically change the amount of processing power available. This is also an issue for crowded fields (below). % Special Programs are more likely to include crowded fields than the WFD main survey area. Due to the increased number of sources, the number of \texttt{DIASources} -- and therefore the number of Alerts -- increases as well. In turn, this increases the processing time and in some cases, may exceed the 60 second limit for Alert Production. A policy is needed on whether Alerts from crowded fields should be allowed a delay, or allowed to be incomplete. K.-T. reports that the control system can easily kill processes that are running over time and move forward with existing outputs -- but that perhaps it will be just as easy to let it keep running and elastically grab additional NCSA resources as needed. This appears possible because the batch system is larger than the Level 1 allocation, although we might not know that this is possible until Level 1 integration happens at NCSA (see Document LDM-230, the operations concepts). {\bf The Moving Object Processing System (MOPS) -- } Since MOPS takes \texttt{DIASources} as input, any Special Programs images that can be run through the Alert Pipeline can be ingested by MOPS. As discussed under "Solar System Objects (SSO)" in Appendix \ref{sec:data_prev}, most of the Special Programs data associated with SSO science will obtain standard visit images anyway. %There was some concern that a large number of small-separation sources might overwhelm the processing system (i.e., from a deep drilling field with many exposures in a sequence), but upon further consideration this worry was rejected. \subsubsection{The Data Release Pipeline (DRP)}\label{ssec:dmplans_drp} This document is not the place for a full consideration of whether or not it would be ``scientifically beneficial" to include any Special Programs data in the DRP data products -- namely, the deep image CoAdds and their corresponding {\tt Source} and {\tt Object} catalogs (\citeds{LSE-163}) -- and we leave that decision for the data quality assessment team in LSST Operations. One example might be when Special Programs data brings additional area up to the same level of depth and cadence as the rest of the WFD main survey. Another may be if including some or all of the shallower Galactic Plane coverage suppresses edge effects or low-order modes in the all-sky photometric solutions. % % % % % % % % % % % % % % % % % % \subsection{Reconfiguring DM Pipeline Components for Special Programs}\label{ssec:dmplans_reconfig} Whether or not Special Programs images are incorporated into the WFD main survey's data products, it is anticipated that most of the Special Program's science goals will require (or benefit from) separate data products (i.e., CoAdds and/or catalogs). For this reason, LSST intends to reconfigure the DM's pipelines in order to generate unique and separate -- but joinable -- imaging and catalog products for Special Programs data, whenever possible. In this context, ``possible" means that no new algorithms need to be written and that an intensive amount of additional computational resources is not required for the processing. In a ``possible" scenario, DM would assemble a pipeline from existing DM codes in order to process data associated with a given Special Program and build image and catalog products that meet the science needs of that particular program. For example, for a DDF SN survey (see Section \ref{ssec:SPCS_SNDDF}), existing DM codes would be used to: (1) make a deep template image from a certain time window, (2) process standard single visit images, (3) create a nightly CoAdd, (4) run difference imaging analysis, (5) run source detection on the difference images, and (6) create \texttt{DIASource} and \texttt{DIAObject} catalog equivalents (this example is also given in Section 6 of the \DPDD, \citedsp{LSE-163}). This type of reconfiguration would also be possible to create as a user-generated pipeline (Section \ref{ssec:dmplans_user}), but having these products provided by the Project ensures a consistent and verified level of quality. %, as well as access for all users to the processed Special Programs data products, which would only increase the scientific value of Special Programs data. --> MLG: data rights doc will probably state that the products for SP data that is only processed by user-generated pipelines will have to be served to all users The above statements of intent are derived from the Data Management Subsystem Requirements document, \citeds{LSE-61}, which contains several requirements related to the processing of data from Special Programs (DMS-REQ-0069, 0320, 0321, 0322, and 0344). To ensure that the work-hours needed to reconfigure and test the pipelines, and run them and verify the data products for public release (which may potentially be needed on intermediate timescales that do not coincide with the Prompt/Yearly timescales, e.g., monthly stacks of deep drilling fields), have been included in the personnel budget, JIRA ticket DM-12575 is currently under consideration. DMS-REQ-0320 states that "it shall be possible for special programs to trigger their own data processing recipes". A header keyword identifying an image as related to a Special Program would be sufficient to send it to a dedicated processing pipeline, and would satisfy this requirement. JIRA ticket DM-12576 is currently open to make sure that this happens. % % % % % % % % % % % % % % % % % % \subsection{Support for User-Driven Processing of Special Programs Data}\label{ssec:dmplans_user} In cases where the science goals of a Special Program require specialized algorithms and cannot be achieved by reconfiguring DM's software, then user-generated pipelines will be needed. Towards this end, LSST DM is making all of its software open-source, and preparing the Science Platform (\citeds{LSE-319}), through which users can access the tools and computational resources to assemble data processing pipelines to achieve their science goals (whether related to Special Programs data or not). During Operations, there will be a method for the system to allocation processing resources in the case of over-subscription. JIRA ticket DM-12577 is currently open to inspire an investigation of whether additional Science Platform capabilities are needed to enable user-driven processing of Special Programs data. If the user-generated processing pipeline for Special Programs data requires requires significantly more computational resources than have been allocated -- where that allocation has been sized approximately, based on image processing for WFD main survey data (i.e., difference imaging, source detection, and/or stacking) -- then external computational resources may be necessary. To support such external processing DM intends to make the data and its code base accessible to and exportable by users in the science community. No user-generated pipeline may contribute Alerts to the Alert Stream, although a separate stream should be possible if the packet and transport formats are adopted (see also Section \ref{ssec:dmplans_user}). It is furthermore expected that, over time, some user-designed pipelines might become ``adopted", installed and operated (and change controlled) by the LSST Operations team. For both adopted and user-run code, whether they are for Special Programs or WFD survey data, the LSST DM team will encourage and facilitate data product databases that are built with the same schema as -- and can easily be joined with -- the tables of the Prompt and DRP data products. An alternative option to ``adopted" code is ``adopted" data products: situations in which user-generated code is run externally, a data catalog is returned to LSST to be ingested, verified, and made public. JIRA ticket DM-12578 is currently open to consider the staffing needs and process for adopting user-generated code or products; this issue is not necessarily limited to Special Programs. % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \clearpage \section{Special Programs Processing Case Studies}\label{sec:SPCS} For further insight to the DM-related needs of potential Special Programs, we can write out all of the data acquisition and processing steps, in order, that some of the proposed Special Programs might use. This kind of thought experiment of describing the reductions and processing could also be a required section of all future white paper proposals. Note that we are not including any analysis in these descriptions, only processing and products. These are not necessarily complete and may even be incorrect in some places, as we are not experts in the science needs of these potential Special Programs; they could use some more thought and input. Basic steps that we use to describe a processing case study: \\ Step 1. Data Acquisition. \\ Step 2. Inclusion in the Prompt Pipeline and Alert Generation. \\ Step 3. Delivery of LSST Processed Images. \\ Step 4. Reconfigured Processing Pipelines and Separate Data Products. \\ Step 5. Inclusion in the DRP Data Products for the WFD Main Survey. \\ Step 6. User-Generated Pipelines and Products. \\ % % % % % % % \subsection{Searching for TNOs with Shift-and-Stack}\label{ssec:SPCS_TNO} This Special Programs processing summary is based on Becker et al. (2011) white paper to find TNOs with shift-and stack (SAS) \citedsp{Document-11013}. Step 1. Data Acquisition. \\ The observational sequence is triggered. In a single night, the 9 adjacent fields in a 3x3 grid are observed with $336$ $\times$ $15$ second $r$-band exposures. This sequence is always repeated 2-3 nights later. This re-visit sequence is repeated 3 more times: 1.5 months, 3 months, and 13.5 months later. Data obtained in the $g$-band filter is also acceptable. \citedsp{Document-11013} Step 2. Inclusion in the Prompt Pipeline and Alert Generation. \\ Each $2\times15$ second visit is processed in the Prompt pipeline and Alerts are released within 60 seconds. Step 3. Delivery of LSST Processed Images. \\ The raw, reduced, and calibrated exposures and difference images from the Prompt pipeline are made available within \texttt{L1PublicT} (currently 24 hours; LSR-REQ-0104), but this is not very relevant for this program, which requires a year of dispersed observations before the processing pipelines for SAS can be run. Step 4. Reconfigured Processing Pipelines and Separate Data Products. \\ Shift-and-stack processing is beyond the scope of DM's algorithms. Step 5. Inclusion in the DRP Data Products for the WFD Main Survey. \\ As with all Special Programs data, they might be included in the products of the WFD main survey if DM decides it is beneficial. However, since these images are much deeper than stacks made from the WFD survey, and the strict timing of the observations might lead to their acquisition in sub-optimal conditions, it is unlikely that they would \textit{all} be incorporated. Step 6. User-Generated Pipelines and Products. \\ The user-generated pipeline running the shift-and-stack processing will be set up and submitted for batch processing by the user through the Science Platform or on an external processor. Pipeline inputs will be the 336 processed exposures per field per re-visit sequence. The DRP difference imaging routine will be used with the same template tract/patch for all. Custom, user-generated algorithms will shift the exposures and create difference images, then DRP routines can stack and do source detection and characterization and generate an object database. Custom code will derive orbital parameters for the detections and add them to a {\tt SSObjects}-like database. % % % % % % % \subsection{Searching for Supernovae in Deep Drilling Fields}\label{ssec:SPCS_SNDDF} Step 1. Data Acquisition. \\ On a single deep drilling field, the scheduler obtains e.g., 5, 10, 10, 9, and 10 visits with $2\times15$ second exposures in $grizy$ (or similar for the night's filter set) and a small dither pattern between visits. Step 2. Inclusion in the Prompt Pipeline and Alert Generation. \\ Each $2\times15$ second visit is processed by the Prompt pipeline's DIA, and Alerts are released within 60 seconds. They are flagged to denote the image source is a DDF and that source association might be compromised. Step 3. Delivery of LSST Processed Images. \\ The raw, reduced, and calibrated exposures and difference images from the Prompt pipeline are made available within \texttt{L1PublicT} (currently 24 hours; LSR-REQ-0104). Step 4. Reconfigured Processing Pipelines and Separate Data Products. \\ The required data products for this science goal can be met by reconfiguring the DM pipelines. First, a template image for the field will be made using DM stacking algorithms. On nights when this DDF is observed, at the end of the sequence of observations, DM algorithms are used to create a nightly deep stack, PSF-match it with the template, create a deep difference image, run source detection on the differences, and create separate databases of \texttt{DIAObject}, \texttt{DIASource}, and \texttt{Object} that are unique to this DDF. The LSST codes for alert packet and transport could be used to distribute the detected objects e.g., to the same brokers that receive the Alert Stream, or alternative destinations. However, these packets would not be distributed via the LSST {\tt Alert Stream}, and would need to be identified as, e.g., DDF Alerts. Note that JIRA ticket DM-12585 is currently open to investigate whether or not the internal real/bogus routine be able to run on a nightly CoAdd of deep drilling difference images. Step 5. Inclusion in the DRP Data Products for the WFD Main Survey. \\ As with all Special Programs data, they might be included in the products of the WFD main survey if DM decides it is beneficial. Step 6. User-Generated Pipelines and Products. \\ For the science goal of searching for supernovae in nightly stacked DDF images, no separate user-generated software appears necessary. % % % % % % % \subsection{A Twilight Survey with Short Exposures}\label{ssec:SPCS_Twilight} Several kinds of twilight surveys with short exposures have been or might be proposed: to put brighter stars (or transients such as supernovae) that saturate in a $15$ second image onto the LSST photometric system and/or to observe the Sweetspot, 60 degrees from the sun, for near-Earth objects. The processing case study for these is currently limited by unknowns about the first step: the reduction of processed single visit images. Step 1. Data Acquisition. \\ At a specified time (or e.g., 6 degree twilight), the scheduler begins dither pattern of short exposures. Location and exposure times are set by the sky brightness and desired saturation limits. Step 2. Inclusion in the Prompt Pipeline and Alert Generation. \\ Pending studies of short-exposure suitability for DIA (see Section \ref{ssec:dmplans_NSV}) and scalable processing capabilities to incorporate a faster image-input rate than $1$ every $30$ seconds, these data could {\it potentially} be incorporated and spawn Alerts. Step 3. Delivery of LSST Processed Images. \\ Pending the issues mentioned above, the raw, reduced, and calibrated exposures and difference images from the Prompt pipeline are made available within \texttt{L1PublicT} (currently 24 hours; LSR-REQ-0104). Step 4. Reconfigured Processing Pipelines and Separate Data Products. \\ This is officially not determined, but so long as the short-exposure images can be processed and have enough stars for photometric and astrometric calibration, reconfigured DM pipelines will probably be sufficient for creating image and catalog products from this kind of data. Step 5. Inclusion in the DRP Data Products for the WFD Main Survey. \\ These short-exposure, high sky background images would not contribute to the DRP data products created for the WFD survey. Step 6. User-Generated Pipelines and Products. \\ If short-exposure images cannot be processed with the existing DM algorithms, a user-generated processing pipeline might be needed to reduce the raw data. Side note: A short-exposure survey of the bright stars of M67, described in Chapter 10.4 of the Observing Strategy White Paper \citep{2017arXiv170804058L}, suggests using the stretch goal of 0.1 second exposures or, if that is not possible, \textit{"custom pixel masks to accurately perform photometry on stars as much as 6 magnitudes brighter than the saturation level"}. This would be considered a user-generated algorithm. % % % % % % % \subsection{The Galactic Plane Survey for Variable Stars and/or Exoplanets}\label{ssec:SPCS_GPVSEx} Step 1. Data Acquisition. \\ The schedule incorporates fields in the Galactic Plane, and executes $2\times15$ second visits in these fields (or shorter, for a shallower depth than the WFD main survey). Step 2. Inclusion in the Prompt Pipeline and Alert Generation. \\ Each $2\times15$ second visit is processed in the Prompt pipeline and Alerts are released within 60 seconds. Extremely crowded fields might have to be skipped if they take longer to process and violate the $60$ second latency for Alerts. Step 3. Delivery of LSST Processed Images. \\ The raw, reduced, and calibrated exposures and difference images from the Prompt pipeline are made available within \texttt{L1PublicT} (currently 24 hours; LSR-REQ-0104). Step 4. Reconfigured Processing Pipelines and Separate Data Products. \\ The image and catalog products needed for science with the Galactic Plane are very similar to the products of the Prompt and DRP pipelines, so it seems that not much reconfiguration would be needed. The biggest difference might be the incorporation of a user-supplied deblender algorithm optimized for very crowded fields. Step 5. Inclusion in the DRP Data Products for the WFD Main Survey. \\ It is quite likely that images from the Galactic Plane will be included into the products of the WFD main survey, as they could e.g., reduce edge effects and help with global photometric classification, but this will depend on deblender performance, and left to the discretion of DM. Step 6. User-Generated Pipelines and Products. \\ It seems likely that science users will want to deploy their alternative deblending algorithms on this data set and create their own catalogs. %\input{spcs_sas} \clearpage \bibliography{local,lsst,refs,books,refs_ads} \clearpage % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \appendix % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \section{Previously Proposed Special Programs}\label{sec:data_prev} In this section we compile information about the science goals and observational methods for Special Programs that have been previously proposed or discussed in the Science Community. We use these to infer the potential deviations from standard visit images, and to get a basic idea of the DM processing needs that would be required to enable the science. The main resources from which we have collected information about the Community's Special Program are: \citep{2008arXiv0805.2366I}; \citep{LPM-17}; the LSST Deep Drilling Field white papers from 2011\footnote{\url{https://project.lsst.org/content/whitepapers32012}}; presentations by Niel Brandt and Stephen Ridgway at the LSST Project and Community Workshop in August 2016\footnote{\url{https://project.lsst.org/meetings/lsst2016/sites/lsst.org.meetings.lsst2016/files/Brandt-DDF-MiniSurveys-01.pdf} and \url{https://project.lsst.org/meetings/lsst2016/sites/lsst.org.meetings.lsst2016/files/Ridgway-SimulationsMetrics_1.pdf}}; \citep{2013arXiv1304.3455G}; and Chapter 10 of \citep{2017arXiv170804058L}. %\begin{itemize} %\item "LSST: from Science Drivers to Reference Design and Anticipated Data Products" Ivezi\'{c} et al. (2008), \citep{2008arXiv0805.2366I} %\item The LSST Science Requirements Document (\SRD), \citeds{LPM-17} %\item LSST Deep Drilling white papers from 2011: \url{https://project.lsst.org/content/whitepapers32012} %\item "General Review of the Proposed DDF and MS", LSST AHM Aug 2016 presentation by Niel Brandt \url{https://project.lsst.org/meetings/lsst2016/sites/lsst.org.meetings.lsst2016/files/Brandt-DDF-MiniSurveys-01.pdf} %\item "Simulations, Metrics and Merit Function for Mini-Surveys and DDF", LSST AHM Aug 2016 presentation by Stephen Ridgway \url{https://project.lsst.org/meetings/lsst2016/sites/lsst.org.meetings.lsst2016/files/Ridgway-SimulationsMetrics_1.pdf} %\item "LSST's DC [Deep CoAdd] Bias Against Planets and Galactic-Plane Science" by A. Gould, \citep{2013arXiv1304.3455G} %\item Chapter 10 "Special Surveys" of the Observing Strategy White Paper \citep{2017arXiv170804058L} %\end{itemize} So far, only one aspect of the LSST Special Programs are set: the locations of the four chosen deep drilling fields\footnote{\url{https://www.lsst.org/scientists/survey-design/ddf}}. There are three mini-survey areas that have been discussed extensively by the Science Community: the North Ecliptic Spur (NES), the South Celestial Pole, and the Galactic Plane (see Figure 8 of \citep{2008arXiv0805.2366I}). In Table \ref{tab:ddfms} we list the four extragalactic deep drilling fields have already been specified, along with an \textit{incomplete} list of potential mini-surveys that have been openly discussed in the Science Community. In Section \ref{sec:SPCS}, we create detailed DM Processing Case Studies for several of these Special Programs in order to identify any potential issues with reconfiguring the DM pipelines to create specific data products for these programs. \begin{table}[h] \begin{center} \begin{footnotesize} \caption{Approved DDF and Incomplete List of Potential Special Programs.} \label{tab:ddfms} \begin{tabular}{lll} \hline \hline Name & Coordinates & Description \\ \hline DDF Elias S1 & 00:37:48, -44:00:00 & approved, cadence TBD \\ DDF XMM-LSS & 02:22:50, -04:45:00 & approved, cadence TBD \\ DDF Extended Chandra Deep Field-South & 03:32:30, -28:06:00 & approved, cadence TBD \\ DDF COSMOS & 10:00:24, +02:10:55 & approved, cadence TBD \\ \hline North Ecliptic Spur & & solar system objects (find and characterize) \\ Galactic Plane & & more intensive stellar surveying \\ South Equatorial Cap & & S/LMC and more Galactic science \\ Twilight & & short exposures (0.1s) for bright stars \\ Mini-Moons & & finding mini-moons \\ Sweetspot & & 60 deg from Sun for NEOs on Earth-like orbits \\ Meter-Sized Impactors & & detection a week before impact \\ GW Optical Counterparts & & search and recovery \\ Old Open Cluster M67 & dec +12 & compact survey above Galactic plane \\ \hline \end{tabular} \end{footnotesize} \end{center} \end{table} Here we consider a variety of scientific fields in turn, the Special Programs that have been discussed in that Science Community so far, and the implications of these Programs for the diversity of data and data products. Generally, the types of LSST Special Programs that are open for proposals include: (i) additional deep drilling fields; (ii) refined observing strategies for deep drilling fields; (iii) optimized survey areas for the NES, South Pole, and Galactic Plane; (iv) refined observing strategies for the NES, South Pole, and Galactic Plane; and (v) additional mini-surveys (areas and observing strategies). \medskip \noindent \textbf{A Nominal DDF Observing Strategy -- } Ivezi\'{c} et al. (2008, \citep{2008arXiv0805.2366I}; Section 3.1.2) describes a nominal DDF data set as $\sim50$ consecutive $15$ second exposures in each of four filters, repeated every two nights for four months. Each exposure would have a $5\sigma$ limit of $r\sim24$; the nightly stack would have a limit of $r\sim26.5$; and the final deep stack of all exposures would have a limit of $r\sim28$. This description does not comment on the processing mode, but, depending on the science goals the exposures could be done as either a series of 50 non-standard visits ($1\times15$ seconds) or 25 standard visits ($2\times15$ seconds). \medskip \noindent \textbf{Solar System Objects (SSO) -- } Four of the mini-surveys in Table \ref{tab:ddfms} have science goals related to studies of SSO. Observations of the North Ecliptic Spur area could yield more $\geq140$ m near-earth objects (NEOs) for the final LSST sample (reference: Brandt's talk). The Mini-Moons Mini-Survey aims to find and study the temporarily captured satellites of the Earth (Section 10.2, \citep{2017arXiv170804058L}). The Sweetspot Survey would use twilight fields to find NEOs in Earth-like orbits (i.e., these objects are never in opposition fields, but overhead at sunrise/sunset; Section 10.2, \citep{2017arXiv170804058L}). The Meter-Sized Impactors program would find and track meter-sized impactors $<2$ weeks before impact (Section 10.2, \citep{2017arXiv170804058L}). {\bf Summary:} most of these science goals do not seem to require non-standard visits or exposure times, with the exception of the Sweetspot survey which occurs during twilight and thus may require shorter exposures. The cadence and patterns of these mini-surveys may differ from the WFD main survey, especially when very fast-moving objects are sought. From a processing perspective, it seems that many of these science goals will be achievable by using the products of the Moving Object Processing System (MOPS), which runs on the Prompt Pipeline's \texttt{DIASource} catalogs after they are updated each night. The exception is finding faint SSOs (e.g., Trans-Neptunian Objects Trojans, asteroids, long-period comets, dwarf planets) through shift-and-stack (SAS) processing \citedsp{Document-11013}, because SAS is not a capability being built within the DM system and cannot be done solely by reconfiguring DM pipelines. An example of user-generated pipeline for SAS is described in Section \ref{sec:SPCS}. \medskip \noindent \textbf{Stars in the Milky Way and Magellanic Clouds -- } As described in \citedsp{Publication-141}, mini-surveys of the Galactic Plane can better distinguish faints stars from faint red galaxies by including at least 3 filters of coverage (e.g., $izy$; similar to WFD), and could mitigate losses from proper motion and increase the detection rate of stellar flares by obtaining all the images in short time span (i.e., a more concentrated cadence than the WFD). As described in \citedsp{Publication-145}, applying the nominal DDF observing strategy over the full area of the Large and Small Magellanic Clouds can characterize stellar variability to $M_V<6.5$ on timescales from 15 seconds to 3 days. For this, special co-adds may be required, e.g., \textit{"to reach variability levels of 0.1 to 0.005 mag will require co-adds depending on the timescale of the particular variables"} \citedsp{Publication-145}. The Twilight survey in Table \ref{tab:ddfms} proposes short exposures to enable bright stars to be put on the same photometric system as the deeper LSST WFD main survey catalog, and enable science that is based on their long monitoring baselines from historical observations. In Chapter 10.4 of \citep{2017arXiv170804058L}, a proposed short-exposure survey of M67 would use the camera's stretch goal of $0.1$ second exposures or, if that is not possible, \textit{"custom pixel masks to accurately perform photometry on stars as much as $6$ magnitudes brighter than the saturation level"}. {\bf Summary:} while some of these science goals can be accomplished with standard visits, MW \& L/SMC science goals are likely to request shorter exposure times, perhaps down to $0.1$ seconds. These science goals are also likely to propose cadence and filter distributions that are significantly different from the WFD main survey. From a processing perspective, the science goals depending on shorter exposures will only be able to be met by reconfiguring the DM pipelines if the short exposures can be shown to successfully be processed (with, e.g., instrument signature removal); the science goals can likely be met with data products in the same format as the Prompt or DR Pipeline (i.e., {\tt Source} and {\tt Object} catalogs, single visits and deep CoAdds). Although it is not mentioned in the above paragraph, the MW \& L/SMC science community is also most likely to require special processing to extract information from saturated stars, which is outside the scope of DM. See Section \ref{ssec:SPCS_GPVSEx} for more detailed DM processing case studies. \noindent \textbf{Exoplanets -- } As described in Section 3.1.2 of \citep{2008arXiv0805.2366I}, transiting exoplanets could be detected with the nominal DDF plan, which would allow for $1\%$ variability to be detected over hour-long timescales; a DDF field at Galactic latitude $30$ degrees would yield $10^6$ stars at $r<21$ that would have $\mathrm{SNR}>100$ in each single exposure of the sequence. \citep{2013arXiv1304.3455G} describes how transits can be extract from a wider-area survey of the Galactic Plane, and how microlensing candidates can be found with $\sim22$ mag imaging over the Galactic Plane region every 3-4 days (since microlensing events are slower; these would then require follow-up with external facilities). Dealing with the more crowded fields would be mitigated by the shallower images, in this case. One of the main points of \citep{2013arXiv1304.3455G} is that the Galactic Plane can yield a lot of science despite the fact that its eventual deep co-adds would be uselessly confusion limited, and therefore should not be skipped. \textbf{Summary.} Some of these science goals appear possible with standard visit images, and some might request shorter exposures to avoid confusion in crowded fields when the science can be done with brighter stars. From a processing perspective, the science goals are likely to be achievable with reconfigured DM pipelines, but this depends heavily on performance in crowded fields. See Section \ref{ssec:SPCS_GPVSEx} for a more detailed DM processing case study for Galactic Plane regions. \noindent \textbf{Supernovae -- } The nominal DDF plan described in \citep{2008arXiv0805.2366I}, which builds nightly stacks with a limit of $r\sim26.5$ out of standard visit images, would extend the SN sample to $z\sim1.2$ and provide more densely sampled light curves for cosmological analyses. The optimal exposure time distribution might be 6, 5, 10, 10, 9, 10 in $ugrizy$ \citedsp{Publication-144}. High-cadence observations of DDF would be the only way to detect fast transients, particularly extragalactic novae, some tidal disruption events, optical counterparts to gamma-ray bursts, and peculiar SNe \citep{2014ApJ...794...23D}. Generating the best-possible individual SN light curves for cosmological analyses requires building special, deep-as-possible, SN-free host galaxy images and using them as a template. This will also be necessary for studying SNe that appear in the template image; i.e., that last $>1000$ days. These are mostly Type IIn, probably explosions of massive stars into dense circumstellar material, which are not used for cosmology but rather to study late-stage stellar evolution and mass loss. SN-free images will also be needed to measure correlated properties for cosmology and to do host-galaxy science. The latter, specifically the "characterization of ultra-faint SN host galaxies", is also mentioned in the Galaxies DDF WP \citedsp{Publication-142}. Short-exposure observations of bright, nearby SNe may also be useful to include near-peak photometry in the LSST magnitude system, and enable full light-curve analyses. \textbf{Summary.} All of these science goals appear possible with standard visit images (with the exception of a target-of-opportunity short-exposure program to observe bright SNe). From a processing perspecitve, the science goals appear to be accessible with reconfigured DM pipelines to stack and difference the data. In particular, the DRP codes to create "transient-free CoAdds" will be suitable for generating the SN-free templates for DDF, as they will do for the Main Survey images. See also Section \ref{ssec:SPCS_SNDDF} for a DM processing case study to find SNe in a DDF. \noindent \textbf{Galaxies -- } The additional depth of a DDF may provide access to a larger collection of low-$\mu$ objects. \citedsp{Publication-142} mentions "identification of nearby isolated low-redshift dwarf galaxies via surface-brightness fluctuations" and "characterization of low-surface-brightness extended features around both nearby and distant galaxies". The DDF stacks could also be used to characterize of high-$z$ clusters, although this ability might depend on deblending extended objects. Also, the DDF observations, when combined with the WFD, allow for AGN monitoring on a variety of timescales in well-characterized galaxies \citedsp{Publication-142,Publication-143}. \textbf{Summary.} As with the SN science goals, these use standard visit images and reconfigured DM pipelines to make deep CoAdds and extract sources. In addition, it seems likely that user-generated algorithms that are optimized to detect and characterize particular types of faint extended sources will be needed, and these are beyond the scope of DM. \noindent \textbf{Weak Lensing -- } The deeper imaging from DDFs can help with shear systematics and the effects of magnification in the analysis of WFD data (community forum, Jim Bosch). \textbf{Summary.} As with the SN and Galaxies DDF-related science goals, these use standard visit images and reconfigured DM pipelines can be used to make deep CoAdds and extract sources, as Jim notes. %$\bullet$ \textit{Jim Bosch -- "Will need to process at least some deep drilling fields (high-latitude ones) in the same way we process a full data release production before running the full data release production, so we can use the results to build priors and/or calibrate shear estimates on the wide survey"} (\texttt{\Large{Community}} forum) \\ %$\bullet$ \textit{Jim Bosch -- "Will need to process various wide-depth subsets of some deep drilling fields (again, high-latitude ones) using the regular DRP pipeline. We'll definitely want best-seeing, worst-seeing, and probably a couple of independent typical-seeing subsets, but there may be other ways we'd want to subdivide as well."} (\texttt{\Large{Community}} forum) \\ %$\bullet$ \textit{MLG side note -- Photo-$z$ are very important to weak lensing \citedsp{Document-10963} and so perhaps the implemented method should be chosen with weak lensing science prioritized.} \\ %\input{docrev} \end{document}
{ "alphanum_fraction": 0.7794458007, "avg_line_length": 166.0052631579, "ext": "tex", "hexsha": "96c86ad5be928b1c72fe7d011a38178e2e2ebef5", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0aeb7ebd4138b2bd35270a66f7ad9ad96b8a6319", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "lsst-dm/dmtn-065", "max_forks_repo_path": "DMTN-065.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "0aeb7ebd4138b2bd35270a66f7ad9ad96b8a6319", "max_issues_repo_issues_event_max_datetime": "2019-06-25T17:12:55.000Z", "max_issues_repo_issues_event_min_datetime": "2019-06-25T16:55:21.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "lsst-dm/DM_SP_study", "max_issues_repo_path": "DMTN-065.tex", "max_line_length": 2574, "max_stars_count": null, "max_stars_repo_head_hexsha": "0aeb7ebd4138b2bd35270a66f7ad9ad96b8a6319", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "lsst-dm/DM_SP_study", "max_stars_repo_path": "DMTN-065.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14622, "size": 63082 }
\documentclass[a4paper,12pt]{article} \usepackage[utf8]{inputenc} \usepackage[margin=3cm]{geometry} \usepackage[english]{babel} \usepackage{amsmath,graphicx,xcolor,dcolumn,booktabs,hyperref,abstract} \usepackage{txfonts} \usepackage[T1]{fontenc} \hypersetup{ colorlinks=true, linkcolor=black, urlcolor=blue, citecolor=black } \author{Andrea Pasqualini} \title{My New Exciting Research Project} \date{} \begin{document} \maketitle \begin{abstract} This research project answers new and interesting questions using novel, super-robust and one-click reproducible methodologies. It argues that the marginal cost of creating a software infrastructure, as software developers would have, is dwarfed by the marginal benefit, which consists of continuously-reproducible results with traceable and documented changes. \end{abstract} \section{Introduction} Once upon a time\dots \section{Conclusion} The end. \end{document}
{ "alphanum_fraction": 0.7776617954, "avg_line_length": 25.2105263158, "ext": "tex", "hexsha": "3a1a5b8e7364d57c3650f689fb9dfd78c48fc014", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4fe6dda8d5b5e4b03538acde9b6fd4f171700613", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AndreaPasqualini/research-project", "max_forks_repo_path": "src/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4fe6dda8d5b5e4b03538acde9b6fd4f171700613", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AndreaPasqualini/research-project", "max_issues_repo_path": "src/paper.tex", "max_line_length": 237, "max_stars_count": null, "max_stars_repo_head_hexsha": "4fe6dda8d5b5e4b03538acde9b6fd4f171700613", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AndreaPasqualini/research-project", "max_stars_repo_path": "src/paper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 248, "size": 958 }
\documentclass[letterpaper,12pt]{article} %\documentclass[draft,twoside,letterpaper,12pt]{article} \title{Societal Sadomasochism\footnote{This article is released under Version 3.0 of the ``Attribution (By)'' Creative Commons license and\slash or Version 1.3 of the \textsc{Gnu} Free Documentation License. Originally published online on May 29, 2018. This article-version is hereby published at the Internet Archive on \today , ark:/\discretionary{}{}{}\href{https://archive.org/details/Societal-Sadomasochism}{13960/\dsc t2r59ws9q}.}} \author{James Redford\footnote{Email address: \textless\href{mailto:[email protected]}{\nolinkurl{[email protected]}}\textgreater .}} %\date{January 17, 2017} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{lmodern} \usepackage[english]{babel} \usepackage[runin]{abstract} \usepackage{thinsp} \usepackage[charter]{mathdesign} \usepackage[hyphens]{url} \usepackage{hyperref} \usepackage{geometry} \usepackage{appendix} \usepackage{attachfile} \urlstyle{sf} \renewcommand{\sfdefault}{fvs} \newcommand{\asterism}{\smash{% \raisebox{-.5ex}{% \setlength{\tabcolsep}{-.5pt}% \begin{tabular}{@{}cc@{}}% \multicolumn2c*\\[-2ex]*&*% \end{tabular}}}} \abslabeldelim{\mbox{:\hspace{-0.25em}\quad}} \newcommand{\dsc}{\discretionary{}{}{}} \newenvironment{squote} {\quote\small} {\endquote} \newenvironment{squotation} {\quotation\small} {\endquotation} \newenvironment{sverse} {\verse\small} {\endverse} \frenchspacing \sloppy %\fussy \clubpenalty=5000 \widowpenalty=10000 \begin{document} \maketitle \renewcommand{\abstractname}{\textsc{Abstract}} \setlength{\absparindent}{0.5em} \begin{abstract} One cannot understand the extreme schizophrenia and sadomasochistic psychopathy of mankind---and hence the appeal that etatism holds for many---without incorporating the crucial insight provided by psychologist Julian Jaynes in his 1976 monograph \emph{The Origin of Consciousness in the Breakdown of the Bicameral Mind}. And here I'm not speaking of so-called ``aberrant'' human psychology, but rather simply standard human psychology that all humans are born with due to natural evolution. \end{abstract} \section{The Problem} \label{sec:TheProblem} \emph{The} great age-old social problem that has faced mankind, and still very much does, is that most people do not love themselves, but instead actually hate themselves. Human beings tend to be extreme gluttons for punishment. This can unmistakably be seen in the extreme systems of mass-horror that humans continuously construct for themselves. It's not as if we don't have essentially the entirety of civilizational human history that pointedly warns against such social systems, yet humans are utterly fascinated and enchanted by them, like moths to a flame. Obviously these systems of mass-horror are serving some deep-seated need within the human psyche. Now, of course, this is not a conscious realization for most people, but rather is a psychological imperative which they are subconsciously controlled by. This has to do with evolutionary psychology, particularly after the Neolithic Revolution and the breakdown of the bicameral mind discussed by psychologist Julian Jaynes in his book \emph{The Origin of Consciousness in the Breakdown of the Bicameral Mind} (Boston: Houghton Mifflin, 1976). It was the widespread belief among the ancients circa three millennia ago and before that they actually directly interacted with the gods. Jaynes's crucial insight was that before the breakdown of the bicameral mind around said era, during the evolution of humans out of an animalistic mental state, that humans were not actually self-conscious, but rather had no choice but to obey the commands of the gods, of which gods were actually one part of the brain communicating with a different part---the sensate, action-response part---via human language that would be heard as actual voices. In other words, our ancestors of circa that era and before were an especially extreme form of schizophrenics. I haven't heard supporters of Jaynes mention this as an item in Jaynes's favor before, but muscarinic acetylcholine receptor antagonists, such as scopolamine, provide strong evidence for Jaynes's theory---indeed, perhaps the strongest, since it makes the voice-command state Jaynes wrote about completely reproducible. Sociologist Lloyd deMause's work on psychohistory also fits well the Jaynes theory. Jaynes's theory is also reinforced by Artificial Intelligence researcher Marvin Minsky's concept of the Society of Mind (see Marvin Minsky, illustrations by Juliana Lee, \emph{The Society of Mind} [New York: Touchstone, 1988; 1st ed., 1986]). And both Jaynes and Minsky's ideas on this are restatements and elaborations on Mark 5:1--20 and Luke 8:26--39 involving the Messiah's interaction with a demon-possessed man. When Jesus asked the demon what its name was, the entity replied, ``My name is Legion; for we are many.'' Indeed there are a host of these entities within us all. It's amazing to think that the key to cracking the code of Artificial General Intelligence was given some 2000 years ago within these passages. The Jaynesian demons can be usefully defined as those subset of Minskian agents which impel people to untowardness, e.g., self-destruction and social destruction. What a demon is in actuality is a particular type of computer-program operating on the wet-computer of the human brain. Demons are utterly real, but they exist in the exact same ontological manner which the human mind exists, as the human mind is itself a particular type of computer-program operating on the wet-computer of the human brain. The demons are the destructive subsets of the human mind. Science has identified the spiritual realm, and it is the living brain---the living human brain in particular, since that brain is the most complex at present. The spiritual realm exists! And it's not that these demons actually wish to end life's existence, i.e., that they impel humans toward suicide and societal suicide. Mere nonexistence is not what they seek for us. The actual case of the matter is far, far worse than that. For what these demons actually seek is to send their host and everyone else to Hell for all eternity. The demons are infuriated that they do not have the same level of control they once had over their hosts, when they could issue what was perceived as voice-commands and the host had no choice but to obey---that they are not quite the gods they once were. Via the breakdown of the bicameral mind, the Jaynesian gods of old have more or less been relegated to Tartarus (see 2 Peter 2:4, Young's Literal Translation; Weymouth New Testament; or the note to this passage in the English Standard Version), though they still exercise great control over the subconscious mind and compel humans toward systems of extreme mass-horror. As I said, these demonic entities are utterly real---as real as any human being, as they ontologically exist in the sameself way as the essence of what a human being is, i.e., the human personality, i.e., the human mind. If one should ever doubt the real existence of these entities, then there are psychological techniques one can use to summon them, such as Astral Projection, as what often follows attempts at Astral Projection is demon-visitation during episodes of sleep paralysis. And one's interaction with these entities can be perceived as being as real as interacting with any other person in external physical reality---nay, sometimes even more real. One can actually have sex with these entities, such as with the succubi and incubi---or what is perceived as such, seemingly every bit as real as sex with any human. Though I only recommend summoning these entities under conditions of actual scientific research, as they are not to be trifled with. In actuality, what elite occultism is is principally three-pronged: (1) methods of contacting these entities using various mental techniques, including coming into full possession by them; (2) getting people within important social control-sectors to engage in blackmailable behavior so as be able to subjugate them for life; and (3) to provide a spiritual justification for extreme psychopathy. Esoterica at the top echelons is not hokum, but rather utterly practical methods of power. And the demons are outrageous liars who will present themselves as extraterrestrials, departed humans, spirit guides, etc.---though the clandestine scientific psychologists of the deep state, such as with Project \textsc{MKUltra}, undoubtedly know what the actual ontological nature of these entities are. If most people actually were to love themselves, then essentially all the major social problems of the world would be solved, for then people would not tolerate improper impositions upon themselves; but rather seek freedom for themselves, and thus also for others, for one cannot be free while living in a slave-pit. Yet societies are continuously impelled into various hellpits by subconscious psychological forces whereof most people know not---by the demons lurking within them, whispering into their ear, promising Heaven but delivering Hell. World society is quite literally under demonic control, and the demons have nothing nice in store for anyone, let alone those who make a point of consciously summoning them for power. \section{Biography of the Author} \label{sec:BiographyOfTheAuthor} Born in Austin, Texas and raised in the Leander, Texas hill country, James Redford is a born-again Christian who was converted from atheism by a direct revelation from Jesus Christ. He is a scientific rationalist who concludes that the Omega Point (i.e., the physicists' technical term for God) and the Feynman--DeWitt--Weinberg quantum gravity\slash Standard Model Theory of Everything (\textsc{toe}) is an unavoidable result of the known laws of physics. His website is the following: \begin{itemize} \small \item \emph{Theophysics: God Is the Ultimate Physicist}, ark:/\discretionary{}{}{}\href{https://archive.org/details/Theophysics}{13960/\dsc t3fz13g1p}, \textless\url{https://archive.org/details/Theophysics}\textgreater , \textless\url{http://theophysics.epizy.com}\textgreater , \textless\href{http://theophysics.freevar.com}{\textsf{http\dsc :\dsc //\dsc theophysics\dsc .freevar\dsc .com}}\textgreater . \end{itemize} \section{Other Works By the Author} \label{sec:OtherWorksByTheAuthor} \begin{itemize} \small \item James Redford, ``The Physics of God and the Quantum Gravity Theory of Everything'', \emph{Social Science Research Network} (\emph{SSRN}), Sept. 10, 2012 (orig. pub. Dec. 19, 2011), 186 pp., doi:\discretionary{}{}{}\href{https://dx.doi.org/10.2139/ssrn.1974708}{10\dsc .2139/\dsc ssrn\dsc .1974708}, \textless\href{https://archive.org/download/ThePhysicsOfGodAndTheQuantumGravityTheoryOfEverything/Redford-Physics-of-God.pdf}{\textsf{https\dsc :\dsc //\dsc archive\dsc .org/\dsc download/\dsc The\dsc Physics\dsc Of\dsc God\dsc And\dsc The\dsc Quantum\dsc Gravity\dsc Theory\dsc Of\dsc Everything/\dsc Redford\dsc -Physics\dsc -of\dsc -God\dsc .pdf}}\textgreater , \textless\href{https://purl.org/redford/physics-of-god}{\textsf{https\dsc :\dsc //\dsc purl\dsc .org/\dsc redford/\dsc physics\dsc -of\dsc -god}}\textgreater , \textless\href{https://webcitation.org/74HMsJGbP}{\textsf{https\dsc :\dsc //\dsc webcitation\dsc .org/\dsc 74HMsJGbP}}\textgreater . \item James Redford, ``Video of Profs. Frank Tipler and Lawrence Krauss's Debate at Caltech: Can Physics Prove God and Christianity?'', \emph{God and Physics Wiki}, May 12, 2019, \textless\href{https://megalodon.jp/2019-0512-1524-14/godandphysics.fandom.com/wiki/Tipler-Krauss_2007_Debate}{\textsf{https\dsc :\dsc //\dsc mega\dsc lodon\dsc .jp/\dsc 2019\dsc -0512\dsc -1524\dsc -14/\dsc god\dsc and\dsc physics\dsc .fandom\dsc .com/\dsc wiki/\dsc Tipler\dsc -Krauss\dsc \_2007\dsc \_Debate}}\textgreater , \textless\href{http://www.freezepage.com/1557642247WROWXLAMHG}{\textsf{http\dsc :\dsc //\dsc www\dsc .freeze\dsc page\dsc .com/\dsc 1557642247\dsc WROWXLAMHG}}\textgreater , \textless\href{https://web.archive.org/web/20190512065553/http://theophysics.freevar.com/Tipler-Krauss_2007_Debate.html}{\textsf{https\dsc :\dsc //\dsc web\dsc .archive\dsc .org/\dsc web/\dsc 20190512\dsc 065553/\dsc http\dsc :\dsc //\dsc theo\dsc physics\dsc .freevar\dsc .com/\dsc Tipler\dsc -Krauss\dsc \_2007\dsc \_Debate\dsc .html}}\textgreater , \textless\href{https://archive.is/V9njw}{\textsf{https\dsc :\dsc //\dsc archive\dsc .is/\dsc V9njw}}\textgreater . \item James Redford, ``Jesus Is an Anarchist'', \emph{Social Science Research Network} (\emph{SSRN}), Dec.~4, 2011 (orig. pub. Dec. 19, 2001), 60 pp., doi:\discretionary{}{}{}\href{https://dx.doi.org/10.2139/ssrn.1337761}{10\dsc .2139/\dsc ssrn\dsc .1337761}, \textless\href{https://archive.org/download/JesusIsAnAnarchist/Redford-Jesus-Is-an-Anarchist.pdf}{\textsf{https\dsc :\dsc //\dsc archive\dsc .org/\dsc download/\dsc Jesus\dsc Is\dsc An\dsc Anarchist/\dsc Redford\dsc -Jesus\dsc -Is\dsc -an\dsc -Anarchist\dsc .pdf}}\textgreater , \textless\href{https://web.archive.org/web/20120425000701/http://theophysics.host56.com/anarchist-jesus.pdf}{\textsf{https\dsc :\dsc //\dsc web\dsc .archive\dsc .org/\dsc web/\dsc 20120425000701/\dsc http\dsc :\dsc //\dsc theophysics\dsc .host56\dsc .com/\dsc anarchist\dsc -jesus\dsc .pdf}}\textgreater , \textless\href{https://webcitation.org/66AF4TMv3}{\textsf{https\dsc :\dsc //\dsc webcitation\dsc .org/\dsc 66AF4TMv3}}\textgreater . \item James Redford, ``Libertarian Anarchism Is Apodictically Correct'', \emph{Social Science Research Network} (\emph{SSRN}), Dec. 15, 2011, 9~pp., doi:\discretionary{}{}{}\href{https://dx.doi.org/10.2139/ssrn.1972733}{10\dsc .2139/\dsc ssrn\dsc .1972733}, \textless\href{https://archive.org/download/LibertarianAnarchismIsApodicticallyCorrect/Redford-Apodictic-Libertarianism.pdf}{\textsf{https\dsc :\dsc //\dsc archive\dsc .org/\dsc download/\dsc Libertarian\dsc Anarchism\dsc Is\dsc Apodictically\dsc Correct/\dsc Redford\dsc -Apodictic\dsc -Libertarianism\dsc .pdf}}\textgreater , \textless\href{https://web.archive.org/web/20120831123938/http://theophysics.host56.com/Redford-Apodictic-Libertarianism.pdf}{\textsf{https\dsc :\dsc //\dsc web\dsc .archive\dsc .org/\dsc web/\dsc 20120831\dsc 123938/\dsc http\dsc :\dsc //\dsc theophysics\dsc .host56\dsc .com/\dsc Redford\dsc -Apodictic\dsc -Libertarianism\dsc .pdf}}\textgreater , \textless\href{https://webcitation.org/69H4tzCOZ}{\textsf{https\dsc :\dsc //\dsc webcitation\dsc .org/\dsc 69H4tzCOZ}}\textgreater . \end{itemize} \end{document}
{ "alphanum_fraction": 0.7825376286, "avg_line_length": 131.0982142857, "ext": "tex", "hexsha": "dc127e2409c113f6faae6a0c9d4a0a361fffb8f5", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "537e640580b9eec2b3a759800ac1da6e34cbb360", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "JamesRedford/writings", "max_forks_repo_path": "SocietalSadomasochism/Redford-Societal-Sadomasochism.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "537e640580b9eec2b3a759800ac1da6e34cbb360", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "JamesRedford/writings", "max_issues_repo_path": "SocietalSadomasochism/Redford-Societal-Sadomasochism.tex", "max_line_length": 1146, "max_stars_count": null, "max_stars_repo_head_hexsha": "537e640580b9eec2b3a759800ac1da6e34cbb360", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "JamesRedford/writings", "max_stars_repo_path": "SocietalSadomasochism/Redford-Societal-Sadomasochism.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4034, "size": 14683 }
\chapter[Crossing the Etolin Strait and the Challenges Presented by a Single Place Name]{\vspace{-25pt}Crossing the Etolin Strait and the Challenges Presented by a Single Place Name} \sethandle{10125/24848} \def\authorlast{Drozda} \renewcommand{\beginchapter}{\pageref{drozda-ch-begin}} \renewcommand{\finishchapter}{\pageref{drozda-ch-end}} \label{drozda-ch-begin} \thispagestyle{firststyle} \chapauth{Robert Drozda} \affiliation{BIA-ANCSA} \authortoc{Robert Drozda} \section{Introduction} Nunivak Island (Nuniwar) in the Bering Sea is separated from mainland Alaska and Nelson Island (Qaluyaaq)\footnote{While technically an island, geographically Nelson Island is considered part of mainland Alaska (cf. Pratt 2009).} by the hazardous waters and strong currents of Etolin Strait (Akularer/Akuluraq in Cup’ig/Yup’ik).\footnote{Official maps label the strait separating Nunivak from the mainland, Etolin Strait. It was named by the Russian mariner Khromchenko for Captain A. K. Etolin who first encountered it in 1821. Etolin earlier named it “Cook Strait” for Captain James Cook (Orth 1967: 320) and the latter name can still be found on some early maps. The native name on both sides of the strait is Akularer, with minor spelling variances reflecting differences in pronunciation between Cup’ig and Yup’ik.} Prior to the introduction of air travel in Western Alaska, Nunivak remained isolated from the mainland for all but a few months of the year. Unlike much of Alaska where frozen waterways facilitate travel, winter travel across Etolin strait was impossible because the shifting currents never allow it to completely freeze. Summer months too were fraught with hazards associated with small boat travel on the open ocean. Still, as expert ocean travelers some Nuniwarmiut were known to paddle their kayaks across the strait to Nelson Island and from there to points north and south. The direct distance between Nunivak and Nelson Islands’ two closest points of land is less than 30 kilometers. Each point includes a historical occupation site, Englulrarmiut on the Nunivak side and Aternermiut at Nelson Island \hl{(figure 1)}. Both names are representative of the Cup’ig dialect historically spoken only at Nunivak Island. Aternermiut, the focus of this paper, is the only known habitation site outside of Nunivak Island with a Cup’ig name, a name nearly forgotten by Nuniwarmiut and also known to some mainland Central Yup’ik speakers who directly attribute it to the Nunivak people and their distinct language. The site is extensive with over 40 structural features and 35 graves reported. It is documented as a part of a larger Qaluyaarmiut settlement but has also functioned as a staging area and launch place for Nuniwarmiut traveling to and from their home island. A fragment of the bygone era of non-motorized indigenous travel is preserved in the place name Aternermiut. \section{Akularer, Etolin Strait and the Inaccessibility of Nunivak Island} Maps of the Bering Sea are deceptive with respect to the extent of Nunivak Island’s inaccessibility. For example Nunivak lies much closer to the mainland than do the Pribilof Islands or St. Lawrence Island, each of which experienced sustained Western contact much earlier than did Nunivak. But it is just this closeness, combined with its location at the fluctuating southern boundary of winter sea ice extent and the strength of tidal currents through the strait that makes access to the island so difficult. \hl{(photo-shifting pan ice of ES)} The inaccessibility of Nunivak particularly with respect to the hazards of near-shore sea travel is well documented (Drozda 2010: 5-6; Griffin 2004: 116, Lantis XXXX, NOAA 2013; Pratt 2009: 99; VanStone 1957: 97). Most of the island is challenging to approach by sea and its waters remained largely uncharted well into contemporary times; today potential marine obstacles such as reefs and submerged rocks are still not fully surveyed. The uncertain navigation deterred mariners and was a major factor in delaying the effects of Western contact on the Nuniwarmiut (Lantis 1946: 161; VanStone 1957: 97), who Lantis estimated in 1939-40 were “about fifty years behind Nome, Unalakleet, or Bethel in acculturation” (Lantis 1960:vi). Etolin Strait also presented a formidable obstacle to both traditional (indigenous) travel and early contact, particularly of missionaries. John Kilbuck who established the Moravian Mission at Bethel in 1884 made but one journey to Nunivak staying less than one day in1 897,\hl{add endnote on JKs observations?} and the Jesuits established their church at Tununak (only 55 km from the Nunivak village of Mekoryuk) on Nelson Island in 1889, yet Christianity in the form of the Swedish Evangelical Covenant Church did not arrive at Nunivak until 1939 (\hl{reference}). \section{Crossing the Strait} According to the earliest references in the Alaska Coast Pilot, “tidal currents are so strong {[}in Etolin Strait{]} that the middle portion does not freeze over in winter. Navigation is difficult from mid-December to mid-May and usually is suspended from early January to late March” (NOAA 2013: 430).\footnote{This statement has seen little modification from its first printing in the 1908 Coast Pilot, where it reads, “It is stated that the tidal currents in Etolin Strait are so strong that the middle portion does not freeze over in winter.” (1908:40, 2013: 430).} This is common knowledge among the Nuniwarmiut and is represented in their oral history. Nunivak elder Joe David (2005; Drozda 2010: 6) recounted a story involving a non-native shipwreck survivor who attempted to walk from Nunivak to the mainland. Feeling stranded the man gazed across the strait at Nelson Island, which is readily seen from eastern Nunivak on clear days; noting its nearness he disregarded warnings of unstable ice, attempted the crossing and drowned. Nelson Island elders speak of the hazard as well. Phillip Moses of Toksook Bay, stated, “it does not freeze, but the ice goes back and forth and there’s no way through it.” And John Alirkar said, “The current is evidently extremely strong in the ocean {[}Etolin Strait{]}” (CEC 2011: 157). Nuniwarmiut are expert boatmen and prior to the introduction of motorized craft were known to make the crossing by kayak (Fienup-Riordan 1996: 156). Nunivak elder Peter Smith stated with a characteristic simplicity, “I am an ocean man” (Smith 1986) \hl{Smith photo with kayaks}, and Kay Hendrickson and George Williams, Sr., both reported crossing the strait by kayak on more than one occasion (references). Author and skin boat builder Skip Snaith who has made an ethnographic study of the Nunivak kayak, reported Nuniwarmiut “retained large fleets of active kayaks into the {[}19{]}50s, and there was isolated use beyond that time.” He described the strait as “highly exposed and tide swept, and reported that “even today with aluminum boats and 150 h.p. outboards locals think long and hard before such an attempt {[}at crossing{]}” (Snaith 1999). Williams also noted that a kayaker would gauge the tide in order to make the crossing as quickly as possible, estimated at three to five hours on a calm day (Williams 1999). Travel between Nunivak and the mainland was almost exclusively a Nuniwarmiut venture (see \hl{Pratt and Lantis} in Pratt 2009). In an apparent error Orth (1967:16) described a winter crossing of the strait by members of the Jarvis party (a.k.a. Overland Relief Expedition) in December 1897. Orth states the men traveled “(f)rom Nunivak Island… by dog teams across the delta and lake country to Andreafski, on the Yukon (River).” Other sources reveal the party actually departed from Nelson Island: “Captain Jarvis and his party left Cape Vancouver December 16, 1897, starting on a journey of eighteen hundred miles across the frozen waste.”(Bagley 1916: 416) Orth’s statement: “On December 16, 1897, he and three companions were landed on Nunivak Island by the revenue cutter Bear” is an error and “Nunivak” should be replaced with “Nelson” Island. \section{Aternermiut – a Nuniwarmiut Staging Area at Nelson Island} \begin{quote} “As this land was called Nelson Island, there was a saying, in those days, that there was a story that a settlement existed called Aternermiut. I personally saw this settlement. I know this settlement, as I used to travel by kayak between Nuniwar and Nelson Island. This settlement was abandoned when I started traveling by kayak. But before I traveled with a kayak, this settlement was inhabited” (Williams 1991b).\footnote{George Williams was born ca. 1922, so it’s fair to assume the site was abandoned, at least by Nunivakers by the mid or late 1930s.} \end{quote} The abandoned Aternermiut site was investigated by BIA ANCSA archeologists with Nelson Island Yup’iks\footnote{The plural form of Yup’ik is Yupiit, likewise the plural of Cup’ig is Cupiit and many researchers use those forms as well. However, since I write in English here I attach the plural “s.”} in the summer of 1984. Based on interviews, investigators identified the site as a “spring and summer camp/village” and reported the name originated “from the Nunivak Island dialect and is associated with ‘going back to Nunivak Island’ (USBIA 1988).” Paul Agimuk an elder of the Nelson Island village of Tununak stated, “…Nunivak Island people called this Aternermiut” (Agimuk 1984). Nelson Islanders used the term as well, although the site is situated within a larger site complex referred to collectively as Up’nerkillermiut. There appears to be an inconsistency in documented site names and all names for the given sites may not be known to the nearby residents and users of the sites, including those elders who resided there as youths. As patterns of land use and site use change, the names can change too with a tendency from specific to general. Comparisons of the recorded names especially over the last 35 years reveal some names generalized over broader areas or forgotten altogether. \hl{my interpretation is the site had two names, one used by Nunivakers and the other, Up’nerkillermiut, used by Nelson Islanders. As the site fell into disuse and was abandoned completely by the Nelson Islanders the name eventually came to refer to the entire complex of three sites. } Nunivak Island Cup’ig place names are well documented (Drozda 1998), however, mainland names were not included in the structured recording process. Still, Aternermiut occurs on several Nunivak oral history recordings. George Williams, Sr., recalled Aternermiut in a historical narrative he told involving outsiders arriving at the island. Williams (1991) speculated\footnote{Williams clearly stated he did not witness these events, but was repeating them as part of the oral tradition.} a group of foreigners known as Qaviayarmiut\footnote{The identity of the Qaviayarmiut remains in question, some Nunivakers believe they were from St. Lawrence Island (J. Williams 1986; Amos and Amos 2003: ), others claim they were Kawerak Inupiat, and others say it’s a generic term for any Eskimo north of Yup’ik territory, however the term may have a different meaning among the Nuniwarmiut.} left Aternermiut and arrived at Qavlumiut or Taprarmiut\footnote{These two east coast Nunivak habitation sites are near each other.} on Nunivak; eventually, he said, they split up and one group went to Amiigtulirmiut and another to Amkumiut\footnote{Amkumiut is not a place name. Reed provided the translation, “the ones who live over there,” while Amos offered it as a variant name for Taprarmiut (G. Williams 1991). Pratt 2009: 241-242 recorded a possible variant with the Nuniwarmiut subgroup name of Agkumiut, meaning “people of the east coast, in general.”} \hl{(Figure ??, map) flesh out that footnote ix with elaboration on Y/C base am-/ag- from dictionaries.} Aternermiut figures prominently in at least two better-known Nuniwarmiut traditional tales (in English titled “The Dog Husband” and “The Giant Shrew,” although in published versions (cf. Lantis, Fienup-Riordan) the site has remained unnamed (cf. Williams 1991b). Recent place name work at Nelson Island (cf, CEC 2012; Rearden and Fienup-Riordan.) reveals a point of land associated with the village of Up’nerkillermiut” was named Aterneq, this being the base word of Aternermiut. There is no specific statement that Aterneq is a Cup’ig name in origin. Martina John (b. 1936) of Toksook Bay recalled: \begin{quote} “They say many kayakers from Nunivak used to arrive here (a place named Umkuuk) on Nelson Island in the past. They’d arrive with kayaks. And when they started to use boats, they’d continually come up with boats. … They always traveled up {[}to the Kuskokwim river area{]}. And when they returned home, they’d arrive here. We’d go up on top of Umkuuk (rd: near or part of Umkumiut; why is this a dual ending?) and search the {[}ocean{]}. And then after a while, across there beyond Cingigyaq (rd: described in ELOKA as “cape”, but more likely sandbars extending off the cape, there is a channel, Kuiguyurraq, that cuts across it) we’d see a sail, and sometimes there would be two. And when we climbed down, we’d tell them that we had seen boats. Then they’d head our way and arrive, and we’d see that they were people from Nunivak Island. Back in those days, since they weren’t educated in schools, their children would speak in their dialect, and they were fun to listen to” (Rearden 2011: 62-65). \end{quote} John spoke of Nelson Island men also going to the Kuskokwim River region for trade, but makes no mention of them traveling to Nunivak Island by boat. It is evident that Aternermiut was a place for Nuniwarmiut to stage and wait or gather in preparation for crossing the strait. \section{Misplots and Errors} Physically the site complex is well documented (Okada et al. 1982; U.S. BIA1988), yet until recently its location was (and in some cases still is) erroneously marked on maps in records and publications at the federal (USBIA), state (SHPO), regional (Calista Corporation and AVCP), local (Rearden and Riordan) and now global cyber level (ELOKA). Ironically the exact location of the site was ascertained by the author after consulting outdated Google Earth imagery in which site features (predominantly house depressions and food cache pits) can be clearly delineated and precisely matched to archeological ground survey maps. While these site maps appear very accurate, the site is plotted on base maps in at least three different locations. Published map names often create confusion in the process of documenting traditional names and accurately placing them on maps. For example, USGS maps do not include the name Aternermiut (or variants) nor does the name occur in the Dictionary of Alaska Place Names (Orth 1967). However, the base (Aterner) is present in a nearby name with the variant spelling “Atrnak Point,” but, and here is the confusing part, apparently the USGS has transposed the names Atrnak Point (Aterner) and Uluruk Point (Ulurruk) on official maps, such that the Atrnak name is plotted about 5.6 km southeast of the site positively identified as Aternermiut. The actual location of Aterner(q?)/Aternermiut is at the point of land named Uluruk Point on the USGS map (USGS 195?). Errors of this sort are not uncommon on official maps of the region. \section{Linguistic Considerations} As previously mentioned the site complex that includes Aternermiut consists of three individually named sites and is also known by a collective name, variantly Up’nerkarmiut (Okada et al. 1982) and Up’nerkillermiut (U.S. BIA1988). While the three sites each have a Yup’ik name, Aternermiut is a Cup’ig variant (for which there is no Yup’ik equivalent) remembered by some Nelson Island people (reference). In addition to its Cup’ig name Aternermiut is also known by Yup’ik speaking Nelson Islanders as Up’nerkillermiut. Aternermiut, is reported by Native language speakers on both sides as Nunivak in origin (Agimuk 1984; CEC 2013; Rearden 2011; USBIA 1988; ELOKA). Linguistically the place name remains somewhat of a puzzle with a number of different English translations provided over the years. Translations have been made by Yup’ik and Cup’ig translators on both sides of the strait, the etymology remains uncertain but rooted in a proto-Yupik language. Translations of the base, Aterner include, “result of going down,” (Williams 1991a), “a place to step down” (Williams 1991b), “a place to prepare to go down” (BIA ANCSA 1984 {[}I seem to have lost the precise reference{]}), “one drifting out to sea, referring to kayaks leaving Nelson Island” (Rearden (2011:14-15). Another description or loose translation is; "to float away" (\hl{see NI trans - I seem to have lost the precise reference, maybe in Reardens text}). Neither the Yup’ik Eskimo Dictionary (Jacobson 2012) nor the Cup’ig Eskimo Dictionary (Amos and Amos 2003) include the name; however both include an entry for Ater-, respectively: “to get down from something; to go down” and “to get down from a high place.” The name is derived from proto-Eskimo at(ə)-, meaning “down” and at(ə)r-, “go down (to shore)”(Fortescue et al. 2010: ); in the case of Aterner, the post base –ner, “result of” would allow for the implied meaning of “prepare to go down (cross to Nunivak).” With the –miut ending, the place name translates, “settlement of a place to prepare to go down (cross to Nunivak Island).” Dissatisfied with the various translations I wondered, rather than referring to descending a hill or bluff, if “prepare to go down” or “place to step down” or “result of going down” might imply instead to “go down” (cross) to Nunivak Island. I put the question to Cup’ig speaker/dictionary compiler Howard Amos and he replied, “Yes, that is a good assumption, to prepare to go down to Nuniwar. I've heard older people using that term at moments of beginning their trek to Nuniwar [from Nelson Island]” (Amos 2013). \begin{sidewaystable}[h] \centering\small \begin{tabular}{p{2cm} | p{3cm} | p{3cm} | p{6cm}} BASE & Yup’ik (YED2012) & Cup’ig (CED2003) & note \\ \hline ater-&to get down from something; to go down&to get down from a high place& \\ at’er & --- & to go down to river or coast & \\ atercete-, aterceta’arte- &to fish with a driftnet& --- & \\ aternir-&to blow from shore out to sea \# of wind&& \\ at &&& \\ Atn(e)q (Fortescue et al.)&&&PY ‘cape’ and CAY at(n)(e)q Cape Darby (place name) (CoED2010:53) \\ aterneq (NUN)&?&&Aterner in CED orthography; pb –neq(Y), -ner(C) <thing that results from V-ing> (YED) \\ aterte-&to drift with current&to float away; to drift away&PY at(ə)rtə, “drift(out to sea)” \\ atrar-, atr(ar)- (in NUN)&to go down; to descend&to go down to riverbank or coast& \\ atrartuq (at’ertur in NUN) &he is going down&at’ertur he is going down& \\ &Reed (ANLC)&Amos &Other \\ Aternermiut&village/residents of Aterner <result of going down>&1) a place to step down. (Williams 1991b) 2) to prepare to go down [to Nunivak]&1) a place to prepare to go down (US BIA 1984). 2) to float away (ref?). 3) one drifting out to sea, referring to kayaks leaving Nelson Island” (Alice Rearden 2011: 14-15). \\ Atternermuit& --- & --- &Calista B-0799-C; AA-9734. Apl. Source: Tununak\\ \multicolumn{4}{l}{Related Nunivak Place Names} \\ \hline Atrew’ig && a place to descend [MA]&NPNP III, 8.11 \\ Aterwig&place to go down, especially towards water, the beach[IR]&&Presumably 8.11; 86NUNpn5:3, 4,7,8; 86NUN25:10 \\ At'erwig&&NPNP I & II \\ \end{tabular} \caption{Caption} \label{tab:my_label} \end{sidewaystable} \section{Conclusion} Here, what might otherwise be a simple discussion or record of a single name provides an example of the complexity associated with toponymic research. Like many named places, Aternermiut may be clearly delineated in space, but like all place names its linguistic isolation or “singling out” removes it from a greater context or, “constellation of place names,” to borrow a term from Ray (1971:1). \hl{expand} \refheading \begin{hang} \hl{Amos and Amos 2003} Bagley, Clarence. 1916. \textit{History of Seattle from the Earliest Settlement to the Present Time, Volume 3.} The S. J. Clarke Publishing Company, Chicago. CEC 2011 Griffin, Dennis. 2001. Contributions to the Ethnobotany of the Cup’it Eskimo, Nunivak Island, Alaska. \textit{Journal of Ethnobiology} 21(2). 91-127 Jacobson, Steven A. 2012. \textit{Yup'ik Eskimo Dictionary}. Fairbanks: Alaska Native Language Center. Lantis 1960 Orth 1967 \hl{Pratt 2009} On-site interview at Aternermiut 84BAY004 (Paul Agimuk) \& 033(Nicholaus Kailukiak). Williams 1991 \end{hang} \orcidfooter{Robert Drozda}{}{} \label{drozda-ch-end}
{ "alphanum_fraction": 0.7822549586, "avg_line_length": 136.6578947368, "ext": "tex", "hexsha": "2a83776d283a4deaa1c5e1056895e9b7eaeddb0c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6615d8eed20311b861b17cdfaa71e596247dd0bb", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "gmholton/langauge_and_toponymy_book", "max_forks_repo_path": "drozda.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6615d8eed20311b861b17cdfaa71e596247dd0bb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "gmholton/langauge_and_toponymy_book", "max_issues_repo_path": "drozda.tex", "max_line_length": 1816, "max_stars_count": null, "max_stars_repo_head_hexsha": "6615d8eed20311b861b17cdfaa71e596247dd0bb", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "gmholton/langauge_and_toponymy_book", "max_stars_repo_path": "drozda.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5453, "size": 20772 }
\chapter{Using Broadwick} \section{Creating a New Project}\index{Creating a New Project} Broadwick contains a set of packages that can be used as required. The framework is designed to be flexible and does not place any requirement on the user on how to use the framework. It is possible to use the classes and packages of Broadwick without using the powerful framework, creating your own main() method and taking responsibility for reading data files and configuration items though this is not the recommended way of using Broadwick. \subsection{Using The Command Line}\index{Using The Command Line} The Broadwick distribution contains a maven archetype for generating a skeleton project that contains all the configuration files, source code etc that is required to start a project based upon Broadwick. It uses apache maven as it’s build tool. To generate a skeleton using this archetype on the command line (assuming that the broadwick-archetype jar is in your local repository) \begin{sourcecode} mvn3 archetype:generate -DarchetypeGroupId=broadwick -DarchetypeArtifactId=broadwick-archetype \ -DarchetypeVersion=1.1 -DgroupId=broadwick.proj -DartifactId=StochasticSir -Dversion=0.1 \ -Dpackage=broadwick.stochasticsir \end{sourcecode} The groupId (maven uses the group id to uniquely identify your project), artifactId (is the name of your generated jar file without a version), version (the version number for your generated project) and the package to which the generated source will be created can be changed by modifiying the -DgroupId, -DartifactId, -Dversion and -Dpackage arguments above. \subsection{Using Netbeans}\index{Using Netbeans} It is possibly easier to create a project using Netbeans (a free IDE available form Oracle, the ‘owners’ of Java). Open the Netbeans IDE and select File->New Project and choose a Maven project and “Project from Archetype” from the list of projects (see fig{proj1}). \begin{figure}[h] \centering\includegraphics[width=12cm]{proj1.png} \caption{Creating a maven based project} \label{proj1} \end{figure} Click Next and choose the latest version of the broadwick-archetype from the ``Known Archetypes” (see fig \ref{proj2}). The version of the broadwick archetype corresponds to the version of Broadwick. \begin{figure}[h!] \centering\includegraphics[width=12cm]{proj2.png} \caption{Using the Broadwick archetype to create a skeleton project.} \label{proj2} \end{figure} The projects details can be specified on the next screen (fig \ref{proj3}). \begin{figure}[h!] \centering\includegraphics[width=12cm]{proj3.png} \caption{Setting project details for a Broadwick based project.} \label{proj3} \end{figure} Clicking ``Finish” will create the project (fig \ref{proj4}). \begin{figure}[h!] \centering\includegraphics[width=12cm]{proj4.png} \caption{The generated project} \label{proj4} \end{figure} A number of minor changes are needed in the generated project. Select the name of the class (``App”) and right-click and select Refactor->Rename; Change the name of the class to StochasticSIR. In the Broadwick.sh file change the \$\{artifactId\} and \$\{version\} to the artifactId and version specified when the project was created (StochasticSIR and 1.0 respectively). This is a shell script for running your project on Unix based systems, you will need to make it executable. In Broadwick.xml (the configuration file for the generated project) change the name of the <classname> element to reflect the package and class (broadwick.stochasticsir.StocasticSIR) We can build the generated project by selecting Run->Build Project from the menu bar or clicking the \includegraphics[scale=0.22]{proj5.png} icon in the toolbar (see fig \ref{proj6} for some example output from this process). This will create a directory called target that contains, among other items, a jar file containing the compiled code and an executeable jar file ending in .one-jar.jar. The Broadwick.sh file is a shell script that will run the executable jar file on *NIX systems. \begin{figure}[h!] \centering\includegraphics[width=12cm]{proj6.png} \caption{Figure caption} \label{proj6} \end{figure} When Broadwick starts it looks for all the models specified in the <model> elements in the projects configuration file. It creates objects for each <model> found using the default (empty) constructor for the class given in the <classname> element of the model (this is why no constructor is generated for the project). For each project object created Broadwick will call the init(), run() and finalise() methods in turn. In our skeleton project we simply logged the fact that these methods were called. A simplified outline of how Broadwick initialises itself is shown in fig \ref{broadwicksummary}. \begin{figure}[h!] \centering\includegraphics[width=8cm]{BroadwickSummary.png} \caption{Schematic outline of the steps Broadwick performs on startup} \label{broadwicksummary} \end{figure} A description of the configuration file is outlined in the next section. \section{Configuration Files}\index{Configuration Files} The configuration file MUST conform to the Broadwick.xsd specification that is supplied with the Broadwick source code. The configuration is contained within the <project> </project> tags and contains the following tags \begin{tabulary}{1.0\textwidth}{llp{8cm}} \toprule <logs> & <console> & Contains <level> and <pattern> tags to define the level and structure of logging messages displayed on the console.\\ & <files> & Contains <level> and <pattern> tags like the <console> log but also a <name> for the name of the file to contain the log messages and a boolean <overwrite> tag that lets Broadwick know if any existing file with that name should be overwritten.\\ <data> & <databases> & Contains the <name> tag to specify the location of the database.\\ & <datafiles> & Contains the <DirectedMovementFile> <FullMovementFile> <BatchMovementFile> <PopulationFile> <LocationsFile> and <TestsFile> tags for the various file structures recognised by Broadwick. See the following tables in this section for a description of each.\\ <models> & <model> & Broadwick can run several models concurrently (though they will share the same logging and data configurations). \\ \bottomrule \end{tabulary} \vskip 1cm Each model element contains the following: \begin{tabulary}{0.3\linewidth}{l p{10cm}} \toprule <classname> & Exactly one classname element that is the fully qualified name of the java class (that implements the broadwick.model.Model interface). This class MUST have a no argument constructor that Broadwick will use to create an instance through relection.\\ <priors> & The <priors> tag can contain as many elements as necessary, each prior contains an id attribute and optional <hint> and <initialVal> elements. The following priors are recognised by Broadwick\\ & <uniformPrior> additionally contains <min> and <max> elements for uniformly distributed priors.\\ & <gaussianprior> additionally contains <mean> and <deviation> tags for normally distributed priors.\\ & <parameter> The parameters for the model can be encoded in this tag using the id and value attributes to name the parameters and give the parameter value. These can be accessed by the broadwick.model.Model getParameterAs[TYPE](id) method that retrieves the parameter with the given id and converts it the the required type.\\ \bottomrule \end{tabulary} Each datafile contains specific information on it’s layout, i.e. the columns in the file where the required data can be found. The structure of each recognised data file is outlined below. DirectedMovementFile: \begin{tabulary}{0.3\linewidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found. \\ <alias> & An alias for the file. \\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <idColumn> & \\ <movementDateColumn> & \\ <movementDirectionColumn> & \\ <locationColumn> & \\ <speciesColumn> & \\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database. \\ \bottomrule \end{tabulary}\\ FullMovementFile: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <idColumn> & \\ <departureDateColumn> & \\ <departureLocationIdColumn> & \\ <destinationDateColumn> & \\ <destinationLocationIdColumn> & \\ <marketIdColumn> & \\ <marketDateColumn> & \\ <speciesColumn> & \\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database.\\ \bottomrule \end{tabulary}\\ BatchMovementFile: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <batchSizeColumn> & \\ <departureDateColumn> & \\ <departureLocationIdColumn> & \\ <destinationDateColumn> & \\ <destinationLocationIdColumn> & \\ <marketIdColumn> & \\ <marketDateColumn> & \\ <speciesColumn> & \\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database.\\ \bottomrule \end{tabulary}\\ PopulationFile: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <lifehistory> & \\ <population> & \\ <speciesColumn> & \\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database.\\ \bottomrule \end{tabulary} Lifehistory: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ \bottomrule \end{tabulary} Population: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ \bottomrule \end{tabulary} LocationFile: \begin{tabulary}{1.0\textwidth}{l p{9cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <locationIdColumn> & The column in the file containing the id of the location.\\ <eastingColumn> & The column in the file containing the easting coordinate. Coordinates aren’t strictly adhered to in Broadwick so a simple y-coordinate is sufficient.\\ <northingColumn> & The column in the file containing the northing coordinate. Coordinates aren’t strictly adhered to in Broadwick so a simple x-coordinate is sufficient.\\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database.\\ \bottomrule \end{tabulary} TestsFile: \begin{tabulary}{1.0\textwidth}{l p{10cm}} \toprule <name> & The name (including path from the configuration file) where the file can be found.\\ <alias> & An alias for the file.\\ <separator> & The character separating the columns in the datafile, e.g. `,' `<tab>'.\\ <idColumn> & One of these is required, specifying whether the test is performed on an individual, group (e.g. herd) or location (must match the id in the Location file).\\ <groupIdColumn> & \\ <locationIdColumn> & \\ <testDateColumn> & \\ <postiveResultColumn> & \\ <negativeResultColumn> & \\ <dateFormat> & The format the date is given in the data file, see below for details.\\ <customTags> & This optional field allows for optional information to be stored in the database. \\ \bottomrule \end{tabulary} The date format specification follows the standard formatting for dates and times: \begin{tabulary}{1.0\textwidth}{l p{10cm}} \toprule Symbol &Meaning \\ G &era \\ C &century of era (>=0) \\ Y &year of era (>=0) \\ x &year \\ w &week of weekyear \\ e &day of week \\ E &day of week \\ y &year \\ D &day of year \\ M &month of year \\ d &day of month \\ a &halfday of day \\ K &hour of halfday (0~11) \\ h &clockhour of halfday (1~12) \\ H &hour of day (0~23) \\ k &clockhour of day (1~24) \\ m &minute of hour \\ s &second of minute \\ S &fraction of second \\ z &time zone \\ Z &time zone offset/id \\ ' &escape for text \\ '' &single quote \\ \bottomrule \end{tabulary} A simplified configuration file is generated in the skeleton project. It contains configuration items for logging to console and to file for different logging levels (info, warning, error, debug, trace) and we can specify the pattern to apply to the log message. \begin{sourcecode} \begin{verbatim} <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <project> <logs> <console> <level>info</level> <pattern>[%thread] %-5level %msg %n</pattern> </console> <file> <name>broadwick.stochasticsir.log</name> <level>info</level> <pattern>[%thread] %-5level %msg %n</pattern> <overwrite>true</overwrite> </file> </logs> <models> <model id="Broadwick Project"> <classname>broadwick.stochsir.StochasticSIR</classname> </model> </models> </project> \end{verbatim} \end{sourcecode} Common logging patterns are: \begin{tabulary}{1.0\textwidth}{l p{10cm}} \toprule \%C & Outputs the fully-qualified class name of the caller issuing the logging request. \\ \%M {\%method} & Outputs the method name where the logging request was issued. \\ \%L {\%line} & Outputs the line number from where the logging request was issued. \\ \%F {\%file} & Outputs the file name of the Java source file where the logging request was issued. This is not very fast and should be avoided. \\ \%d & Used to output the date of the logging event e.g. \%d{HH:mm:ss,SSS} \\ \%m (\%msg) & Outputs the application-supplied message associated with the logging event. \\ \%t (\%thread) & Outputs the name of the thread that generated the logging event. \\ \%n & Outputs the platform dependent line separator character or characters \\ \%r & Outputs the number of milliseconds elapsed since the start of the application until the creation of the logging event. \\ \%p {\%level} & Outputs the level of the logging event. \\ \bottomrule \end{tabulary} More details on logging patterns can be found at \url{http://logback.qos.ch/manual/layouts.html}. The model section requires a <classname> giving the fully qualified class name and optional <priors> and <parameter> sections. \section{Extending the Model}\index{Extending the Model} Our stochastic SIR model that we have created is a valid Broadwick model but does not perform any useful calculations. We will add some parameters to the configuration file and read (and log them) in the init() method. Firstly, let us define beta and rho parameters for the susceptible->infectious rate and for the infectious->recovered rates respectively and parameters for the maximum time for which we will run the simulation and the name of a file in which we will save the time series data. To do this modify the configured model section by: \begin{sourcecode} \begin{verbatim} <model id="Broadwick Project"> <classname>broadwick.stochasticsir.StochasticSIR</classname> <parameter id="beta" value="0.2" /> <parameter id="rho" value="0.3" /> <parameter id="tMax" value="100" /> <parameter id="outputFile" value="broadwick.stochasticSIR.dat" /> </model> \end{verbatim} \end{sourcecode} Now edit the init() method of the StochasticSir class as shown in fig \ref{proj7}. \begin{figure}[h!] \centering\includegraphics[width=12cm]{proj7.png} \caption{Reading some parameters to our model} \label{proj7} \end{figure} The `Model' class contains getParameterValue(String), getParameterValueAsDouble(String), getParameterValueAsInteger(String), getParameterValueAsBoolean(String) methods to extract parameters from the configuration file as strings (default), doubles, integers and booleans (if the parameter is written as ``true" or ``false").
{ "alphanum_fraction": 0.7322757427, "avg_line_length": 50.1011560694, "ext": "tex", "hexsha": "0a8a856b00641d042d15cda9cc6850db9fe5a1d4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b9c17e26baea943d0786b0203797fa1e0a26726b", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "EPICScotland/Broadwick", "max_forks_repo_path": "doc/chap_using_broadwick.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "b9c17e26baea943d0786b0203797fa1e0a26726b", "max_issues_repo_issues_event_max_datetime": "2022-01-21T23:14:26.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-13T18:32:58.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "EPICScotland/Broadwick", "max_issues_repo_path": "doc/chap_using_broadwick.tex", "max_line_length": 489, "max_stars_count": 4, "max_stars_repo_head_hexsha": "b9c17e26baea943d0786b0203797fa1e0a26726b", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "EPICScotland/Broadwick", "max_stars_repo_path": "doc/chap_using_broadwick.tex", "max_stars_repo_stars_event_max_datetime": "2018-07-02T13:18:33.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-13T18:05:25.000Z", "num_tokens": 4325, "size": 17335 }
\subsection{Kernels} This is a generalisation of the dot product function, where we want to find similarity between two vectors. If we have data points \(v\) and \(w\) the distance is: \(K(v, w)\)
{ "alphanum_fraction": 0.7128712871, "avg_line_length": 20.2, "ext": "tex", "hexsha": "c65d55b5a2e0347a1ca3b7da6d6d938e31319542", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/distance/01-03-kernel.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/distance/01-03-kernel.tex", "max_line_length": 107, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/distance/01-03-kernel.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 51, "size": 202 }
\usepackage{lipsum} \usepackage{amsmath,amssymb,amsthm,amsfonts,dsfont,mathabx} % AMS Math \usepackage{cancel} \usepackage{cite} \usepackage[english]{babel} \usepackage[utf8x]{inputenc} %\usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{lmodern} %\usepackage{ulem} %\usepackage[counterclockwise]{rotating} \usepackage[left=4.0cm,right=2.5cm,top=2.2cm,bottom=2.2cm,includefoot,includehead,headheight=13.6pt]{geometry} \renewcommand{\baselinestretch}{1.05} %\usepackage{datetime} % Table of contents for each chapter \usepackage[nottoc, notlof, notlot]{tocbibind} \usepackage[english]{minitoc} \setcounter{minitocdepth}{2} \mtcindent=15pt % Use \minitoc where to put a table of contents \usepackage{aecompl} % Glossary / list of abbreviations \usepackage{multicol} \usepackage[intoc]{nomencl} \renewcommand{\nomname}{Glossary of terms, Abbreviations, \\ Acronyms and Symbols} \makenomenclature \makenomenclature \setlength{\nomitemsep}{-\parsep} \setlength{\nomitemsep}{0.05pt} %---------------------------------------------- % Links in pdf \usepackage{xcolor} \definecolor{linkcol}{rgb}{0,0,0.4} \definecolor{citecol}{rgb}{0.5,0,0} % definitions. % ------------------- \setcounter{secnumdepth}{2} \setcounter{tocdepth}{2} % Some useful commands and shortcut for maths: partial derivative and stuff \newcommand{\uddu}{\uparrow\downarrow\downarrow\uparrow} \newcommand{\pd}[2]{\frac{\partial #1}{\partial #2}} \newcommand{\vect}[1]{\displaystyle\overrightarrow{#1}} \newcommand{\ket}[1]{\displaystyle\vert #1\rangle} \def\abs{\operatorname{abs}} % \def\argmax{\operatornamewithlimits{arg\,max}} % \def\argmin{\operatornamewithlimits{arg\,min}} \def\diag{\operatorname{Diag}} \newcommand{\eqRef}[1]{(\ref{#1})} \usepackage{rotating} % Sideways of figures & tables %\usepackage{bibunits} %\usepackage[sectionbib]{chapterbib} % Cross-reference package (Natural BiB) %\usepackage{natbib} % Put References at the end of each chapter % Do not put 'sectionbib' option here. % Sectionbib option in 'natbib' will do. \usepackage{fancyhdr} % Fancy Header and Footer % \usepackage{txfonts} % Public Times New Roman text & math font %%% Fancy Header %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Fancy Header Style Options \pagestyle{fancy} % Sets fancy header and footer \fancyfoot{} % Delete current footer settings %\renewcommand{\chaptermark}[1]{ % Lower Case Chapter marker style % \markboth{\chaptername\ \thechapter.\ #1}}{}} % %\renewcommand{\sectionmark}[1]{ % Lower case Section marker style % \markright{\thesection.\ #1}} % \fancyhead[LE,RO]{\bfseries\thepage} % Page number (boldface) in left on even % pages and right on odd pages \fancyhead[RE]{\bfseries\nouppercase{\leftmark}} % Chapter in the right on even pages \fancyhead[LO]{\bfseries\nouppercase{\rightmark}} % Section in the left on odd pages \let\headruleORIG\headrule \renewcommand{\headrule}{\color{black} \headruleORIG} \renewcommand{\headrulewidth}{1.0pt} \usepackage{colortbl} \arrayrulecolor{black} \fancypagestyle{plain}{ \fancyhead{} \fancyfoot{} \fancyhf{} \renewcommand{\headrulewidth}{0pt} } % \usepackage{MyAlgorithm} % \usepackage[noend]{MyAlgorithmic} %%% Clear Header %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Clear Header Style on the Last Empty Odd pages \makeatletter \def\cleardoublepage{\clearpage\if@twoside \ifodd\c@page\else% \hbox{}% \thispagestyle{empty}% % Empty header styles \newpage% \if@twocolumn\hbox{}\newpage\fi\fi\fi} \makeatother %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Prints your review date and 'Draft Version' (From Josullvn, CS, CMU) \newcommand{\reviewtimetoday}[2]{\special{!userdict begin /bop-hook{gsave 20 710 translate 45 rotate 0.8 setgray /Times-Roman findfont 12 scalefont setfont 0 0 moveto (#1) show 0 -12 moveto (#2) show grestore}def end}} % You can turn on or off this option. \reviewtimetoday{\today}{Draft Version} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newenvironment{maxime}[1] { \vspace*{0cm} \hfill \begin{minipage}{0.5\textwidth}% %\rule[0.5ex]{\textwidth}{0.1mm}\\% \hrulefill $\:$ {\bf #1}\\ %\vspace*{-0.25cm} \it }% {% \hrulefill \vspace*{0.5cm}% \end{minipage} } \let\minitocORIG\minitoc \renewcommand{\minitoc}{\minitocORIG \vspace{1.5em}} \usepackage{multirow} \usepackage{diagbox} \newenvironment{bulletList}% { \begin{list}% {$\bullet$}% {\setlength{\labelwidth}{25pt}% \setlength{\leftmargin}{30pt}% \setlength{\itemsep}{\parsep}}}% { \end{list} } \newtheorem{definition}{Definition} \renewcommand{\epsilon}{\varepsilon} % centered page environment \newenvironment{vcenterpage} {\newpage\vspace*{\fill}\thispagestyle{empty}\renewcommand{\headrulewidth}{0pt}} {\vspace*{\fill}} % Change this to change the informations included in the pdf file \usepackage[palatino]{quotchap} \usepackage{tikz} \usepackage{adjustbox} % scaling pictures \usepackage{afterpage} % allows for the rotation % \usepackage{fontspec} \usepackage{setspace} % %%\usepackage[pdftex]{graphicx} \usepackage{subfig} %for subfloat images \usepackage{stmaryrd} \usepackage{algorithmic} \usepackage{algorithm} \usepackage[titletoc]{appendix} % \usepackage{floatpag} % so that with sideway figures i can remove the page number \newcommand{\acknowledgments}{ \chapter*{Acknowledgments} \noindent \input{frontmatter/thanks} \vspace*{\fill} \newpage \setcounter{page}{1} \pagenumbering{arabic} } \usepackage[mathscr]{eucal} \usepackage{textcomp} \newenvironment{theabstract}[1]% {\markboth{#1}{#1}% {\large\noindent\rule{1ex}{1ex}\hspace{\stretch{1}}% {\textbf{\textit{#1}}}% \hspace{\stretch{1}}\rule{1ex}{1ex}} \addcontentsline{toc}{chapter}{#1} \adjustmtc \vskip 0.2in \begin{large}}% {\end{large}} \newcolumntype{R}[2]{% >{\adjustbox{angle=#1,lap=\width-(#2)}\bgroup}% l% <{\egroup}% } \newcommand*\rot{\multicolumn{1}{R{70}{1em}}}% no optional argument here, please! \definecolor{linkcol}{rgb}{0,0,0.4} \definecolor{citecol}{rgb}{0.5,0,0} \usepackage{hyperref} \hypersetup { pdftitle="PhDNaylorPeter", pdfauthor="Peter NAYLOR", %auteur du document pdfsubject="Bioinformatic", %sujet du document %pdftoolbar=false, %barre d'outils non visible pdfmenubar=true, %barre de menu visible pdfhighlight=/O, %effect of clicking on a link colorlinks=true, %couleurs sur les liens hypertextes pdfpagemode=UseNone, %aucun mode de page pdfpagelayout=SinglePage, %ouverture en simple page pdffitwindow=true, %pages ouvertes entierement dans toute la fenetre %liens en couleur pour internet!!! %linkcolor=linkcol, %couleur des liens hypertextes internes %citecolor=citecol, %couleur des liens pour les citations %urlcolor=linkcol %couleur des liens pour les url %liens en noir pour impression!!! linkcolor=linkcol, %couleur des liens hypertextes internes citecolor=citecol, %couleur des liens pour les citations urlcolor=linkcol %couleur des liens pour les url }
{ "alphanum_fraction": 0.6755834584, "avg_line_length": 28.7333333333, "ext": "tex", "hexsha": "a2d51e72bd831e54da1f0f00ccf581da053bf900", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "eaf67661bb6af685bae6c69150c06406c37d0248", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "guiguem/my_phd_template", "max_forks_repo_path": "formatAndDefs.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "eaf67661bb6af685bae6c69150c06406c37d0248", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "guiguem/my_phd_template", "max_issues_repo_path": "formatAndDefs.tex", "max_line_length": 110, "max_stars_count": null, "max_stars_repo_head_hexsha": "eaf67661bb6af685bae6c69150c06406c37d0248", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "guiguem/my_phd_template", "max_stars_repo_path": "formatAndDefs.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2263, "size": 7327 }
\documentclass[british,titlepage]{ntnuthesis} \title{Title of Your Report} \shorttitle{Title of Your Report} \author{Student Name} \shortauthor{Student Name} \date{2021-12-15} \addbibresource{thesis.bib} \input{glossary.tex} % add glossary and acronym lists before document \begin{document} \input{chapters/0a-abstract.tex} \tableofcontents \listoffigures \listoftables \lstlistoflistings \printglossary[type=\acronymtype] % Print acronyms \printglossary % Print glossary \input{chapters/1-introduction} \input{chapters/2-usage.tex} \input{chapters/3-structure.tex} \input{chapters/4-conclusion.tex} \chapter*{\bibname} \printbibliography[heading=none] \input{chapters/papers.tex} \appendix \input{appendices/a-appendix.tex} \end{document}
{ "alphanum_fraction": 0.7678339818, "avg_line_length": 19.7692307692, "ext": "tex", "hexsha": "d24f97da3d56f987f257a8f7ff22dbc0955c7f49", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-12-14T21:52:54.000Z", "max_forks_repo_forks_event_min_datetime": "2021-12-14T21:52:54.000Z", "max_forks_repo_head_hexsha": "fb69257c9ae5718965b67350b50d5ba1509f3013", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "NTNU-HPC-Lab/hpclab-tdt4501-NTNU", "max_forks_repo_path": "thesis.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fb69257c9ae5718965b67350b50d5ba1509f3013", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "NTNU-HPC-Lab/hpclab-tdt4501-NTNU", "max_issues_repo_path": "thesis.tex", "max_line_length": 69, "max_stars_count": null, "max_stars_repo_head_hexsha": "fb69257c9ae5718965b67350b50d5ba1509f3013", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "NTNU-HPC-Lab/hpclab-tdt4501-NTNU", "max_stars_repo_path": "thesis.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 237, "size": 771 }
\chapter{NEAT Tunnel Components} \label{sec:NEATcomponents} \section{Components} A subsection of the drawings developed for the production of the thermal wall plate are shown below. The drawings detail the size and shape of the wall plate components, yet other features such as hole patterns or underneath cut-outs can be obtained by request. Following each set of drawings included figures show the final fabricated components. The convective plates are not pictured as they are simple rectangular plates which can be fully realized from the plate drawings. \clearpage \subsection{Full Wallplate} %full plate \begin{figure}[h!] \centering \includegraphics[scale=.3]{facility/drawings/Plate_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components A} %components A \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/AssembledFirstSection.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/AssembledFirstSection2.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_A.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_A.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/A_meas_v2.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/A_insul_v2.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components B} Input text description?\\ %components B \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/B_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_B.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_B.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/B_meas_v2.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/B_insul_v2.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components C} Input text description?\\ %components C \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/C_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_C.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_C.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/C_meas_v2.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/C_insul_v1.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components D} Input text description?\\ %components D \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/D_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_D.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_D.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/D_meas_v2.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/D_insul_v2.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components E} Input text description?\\ %components E \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/E_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_E.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_E.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/E_meas_v2.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/E_insul_v2.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components F} Input text description?\\ %components F \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/F_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Insulation_F.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=.2]{facility/drawings/Plate_F.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \begin{center} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-a}\includegraphics[scale=0.2]{facility/MachinedParts/F_meas_v1.JPG}}} {\subfigcapskip = 5pt \subfigcapmargin = -12pt \subfigure[]{\label{fig:edge-b}\includegraphics[scale=0.2]{facility/MachinedParts/F_insul_v2.JPG}}} \end{center} \caption{(a) insulation (b) frame. } \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \subsection{Components G} Input text description?\\ \begin{figure}[h!] \centering \includegraphics[scale=.18]{facility/drawings/G_size.PDF} \caption{\footnotesize {\bf XX} } \end{figure} \begin{figure}[h!] \centering \includegraphics[scale=0.18]{facility/MachinedParts/G_meas_v2.JPG} \caption{ \footnotesize part} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \section{Assembly Procedure} \begin{figure}[h!] \centering \includegraphics[scale=0.1]{facility/MachinedParts/G_meas_v2.JPG} \caption{ \footnotesize part} \label{fig:partsG} \end{figure}
{ "alphanum_fraction": 0.6958734525, "avg_line_length": 29.08, "ext": "tex", "hexsha": "fcf4bc86d0be217f10f1c4e73ee639d43699f8ba", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "Biles430/Dissertation", "max_forks_repo_path": "appendices/components.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "Biles430/Dissertation", "max_issues_repo_path": "appendices/components.tex", "max_line_length": 160, "max_stars_count": null, "max_stars_repo_head_hexsha": "5594a5f3d172662b4404d5357d3a28639a0feb43", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "Biles430/Dissertation", "max_stars_repo_path": "appendices/components.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2253, "size": 7270 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage[margin=1in]{geometry} \usepackage{enumitem} \usepackage{amsmath} \usepackage{listings} \usepackage{color} \usepackage{booktabs} \usepackage[T1]{fontenc} % macro to select a scaled-down version of Bera Mono (for instance) \makeatletter \newcommand\BeraMonottfamily{% \def\fvm@Scale{0.85}% scales the font down \fontfamily{fvm}\selectfont% selects the Bera Mono font } \makeatother \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\BeraMonottfamily\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} \title{CS249 Fall 2020\\ Problem Set 1: Statistical Inference} \author{Christopher Munoz Cortes} \date{October 26, 2020} \usepackage{natbib} \usepackage{graphicx} \begin{document} \maketitle \section{Maximum Likelihood Estimation} \begin{enumerate}[label={(\alph*)}] \item Assume we have $n$ positive data points $Y_1,...,Y_n \sim$ Exponential$(\theta)$. Compute the maximum likelihood estimator for $\theta$. We know that the probability density function of an exponential distribution is given by \begin{align*} f(y;\theta) = \begin{cases} \theta e^{-\theta y} & y \geq 0 \\ 0 & \text{o.w.} \end{cases} \end{align*} The likelihood function of this distribution is \[ \mathcal{L}(\theta) = \prod_{i=1}^{n} \theta e^{-\theta y_i} = \left(\theta e^{-\theta y_1}\right) \left(\theta e^{-\theta y_2}\right) ...\left(\theta e^{-\theta y_n}\right) = \theta^n e^{-\theta\sum_{i=1}^{n}y_i} \] The MLE of this exponential distribution can be calculated taking the derivative of the log-likelihood function (which is easier to compute than the likelihood function) and setting it equal to zero. \begin{align*} \dfrac{d}{d\theta}\left[ln{\mathcal{L}(\theta)}\right] &= 0 \\ \dfrac{d}{d\theta} \left[\ln{\left(\theta^n e^{-\theta \sum_{i=1}^{n} y_i}\right)}\right] &= 0 \\ \dfrac{d}{d\theta}\left[n\ln{\theta} - \theta \sum_{i=1}^{n} y_i\right] &= 0 \\ \dfrac{n}{\theta} - \sum_{i=1}^{n}y_i &= 0 \end{align*} Solving for the $\theta$: \begin{equation*} \boxed{\theta = \dfrac{n}{\sum_{i=1}^{n} y_i}} \end{equation*} \item Assume we have $n$ positive data points $Y_1,...,Y_n \sim$ Uniform$(0, \theta)$, meaning the data is coming from a uniform distribution in the interval $[0,\theta]$. Compute the maximum likelihood estimator for $\theta$. The pdf of $Y \sim$ Uniform$(0,\theta)$ is \begin{equation} f(y;\theta) = \begin{cases} \frac{1}{\theta} & 0 \leq y \leq \theta \\ 0 & \text{o.w.} \end{cases} \end{equation} Now consider a fixed value of $\theta$. If $Y_i > \theta$ for some $i$, then $f(Y_i;\theta)$ = 0 and therefore $\mathcal{L}(\theta)$ = 0. This means that if any $Y_i > 0$, $\mathcal{L}(\theta) = 0$. This can be written as $\mathcal{L}(\theta) = 0$, if $\theta < Y_n$, where $Y_{(n)} = \max\{Y_1,\ldots,Y_n\}$. On the other hand, if $Y_{(n)} \leq \theta$, $f(y;\theta) = 1/\theta$ and consequently $\mathcal{L}(\theta) = 1/\theta^n$ Hence, \begin{equation} \mathcal{L}(\theta) = \begin{cases} \frac{1}{\theta^n} & \theta \geq Y_{(n)} \\ 0 & \theta < Y_{(n)} \end{cases} \end{equation} Finally, since $\mathcal{L}(\theta)$ is strictly decreasing in $[Y_{(n)}, \infty)$, the maximum value occurs at $Y_{(n)}$ and $\hat{\theta} = Y_{(n)}$. \item Estimate bias, variance, and RMSE for the estimator in (b) when $\theta = 10$ and $n=100$ by doing simulations in Python. \begin{lstlisting}[language=Python, caption=Code for part (c)] import numpy as np np.random.seed(0) # Estimate bias, variance, and RMSE for a estimator from part (b) # for theta=10, n=100 low = 0 theta = 10 n=100 theta_hats = [] def question_one(low, theta, n): theta_hats = [] for i in range(1000): y = np.random.uniform(low=0, high=theta, size=n) theta_hat = np.max(y) theta_hats.append(theta_hat) theta_hats = np.array(theta_hats) # Calculate bias, variance, and RMSE of the estimator theta_hat # To calculate the bias, take the mean of the theta_hats as the expectation bias = np.mean(theta_hats) - theta variance = np.var(theta_hats) mse = bias**2 + variance rmse = np.sqrt(mse) return bias, variance, rmse bias, variance, rmse = question_one(low, theta, n) \end{lstlisting} Output: \begin{itemize} \item bias$(\hat{\theta})$: -0.100 \item Var$(\hat{\theta})$: 0.00829 \item RMSE$(\hat{\theta})$: 0.135 \end{itemize} \item What are the estimated values in (c) if $n$ increases to 500? Describe your observations. If $n$ increases from 100 to 500, $\hat{\theta}$ starts to converge towards $\theta$ and the ``spread'' of the distribution begins to shrink, as evidenced by the decreasing variance and RSME. In other words, $\hat{\theta}$ becomes a better estimator of $\theta$. \begin{itemize} \item bias$(\hat{\theta})$: -0.0193 \item Var$(\hat{\theta})$: 0.000360 \item RMSE$(\hat{\theta})$: 0.0271 \end{itemize} \end{enumerate} \pagebreak \section{Bootstrap} \begin{enumerate}[label={(\alph*)}] \item Assume we have a sample of 20 IDD data points with the following values: 3.0 1.9 6.4 5.9 4.2 6.2 1.4 2.9 2.3 4.8 7.8 4.5 0.7 4.4 4.4 6.5 7.6 6.1 2.7 1.6 Assume we define $T$ as the median among 20 data points. Use bootstrap to estimate the standard error and the confidence interval for $T$. \begin{itemize} \item Bootstrap standard error: 0.733 \item Bootstrap 95\% confidence interval: (2.964, 5.836) \end{itemize} \begin{lstlisting}[language=Python, caption=Standard Error and Confidence Interval for $T$] # Create array with data data = np.array([3.0, 1.9, 6.4, 5.9, 4.2, 6.2, 1.4, 2.9, 2.3, 4.8, 7.8, 4.5, 0.7, 4.4, 4.4, 6.5, 7.6, 6.1, 2.7, 1.6]) # Create the bootstrap datasets and calculate the median for each t_boot_list = [np.median(np.random.choice(data, len(data), replace=True)) for _ in range(1000)] # Calculate the standard error and the confidence interval t_boot_se = np.std(t_boot_list) ci_lower = np.median(data) - 1.96*t_boot_se ci_upper = np.median(data) + 1.96*t_boot_se \end{lstlisting} \item Use \lstinline{y = np.random.normal(0, 5, 100)} to generate 100 data points from the normal distribution $N(0,5)$. Consider the generated data points as your observed data. \begin{itemize} \item Apply the bootstrap method to estimate the standard error for $T_1$ and $T_2$, where $T_1$ is the sample median and $T_2$ is the maximum value in the sample. \begin{itemize} \item $T_1$ Bootstrap standard error: 0.504 \item $T_2$ Bootstrap standard error: 1.680 \end{itemize} \item Next, compute the actual standard error for $T_1$ and $T_2$ by simulating many times from the data source (i.e., $N(0, 5)$). \begin{itemize} \item Actual standard error by sim from source for $T_1$: 0.623 \item Actual standard error by sim from source for $T_2$: 2.196 \end{itemize} \end{itemize} \begin{lstlisting}[language=Python, caption=Bootstrap and Actual Standard Error] # Generate new data points. This is the observed data. y = np.random.normal(0,5,100) # Apply the bootstrap method to estimate # T1: sample median # T2: max value t1_boot_list = [np.median(np.random.choice(y, len(y), replace=True)) for _ in range(1000)] t1_boot_se = np.std(t1_boot_list) t2_boot_list = [np.max(np.random.choice(y, len(y), replace=True)) for _ in range(1000)] t2_boot_se = np.std(t2_boot_list) # Compute the actual standard error for T1 and T2 by simulating many times # from the data source sim_t1_list = [np.median(np.random.normal(0,5,100)) for _ in range(1000)] sim_t1_se = np.std(sim_t1_list) sim_t2_list = [np.max(np.random.normal(0,5,100)) for _ in range(1000)] sim_t2_se = np.std(sim_t2_list) \end{lstlisting} \end{enumerate} \pagebreak \section{Parametric Bootstrap} \begin{enumerate}[label={(\alph*)}] \item First, we need a parametric distribution model for the data. Let’s assume the data points in part (a) are generated from a normal distribution with the mean of $\theta$ and the standard deviation of 2. Using the observed data, compute $\hat{\theta}$, the estimated value of the $\theta$ in this distribution model. We can compute $\hat{\theta}$ using MLE: \begin{align*} \mathcal{L}(\theta,\sigma) &= \prod_i \dfrac{1}{\sqrt{2\pi\sigma^2}}\exp{\left\{-\dfrac{1}{2\sigma^2} \left(X_i - \theta \right)^2\right\}} \\ \mathcal{L}(\theta,\sigma) &= \dfrac{1}{\sqrt{(2\pi\sigma^2)^n}}\exp{\left\{-\dfrac{1}{2\sigma^2} \sum_i{\left(X_i - \theta \right)^2} \right\}} \end{align*} Taking the log on both sides, \begin{align*} \ln\mathcal{L}(\theta,\sigma) &= -\dfrac{n}{2}\ln 2\pi\sigma^2 - \dfrac{1}{2\sigma^2} \sum_i{(X_i - \theta)^2} \end{align*} Taking the partial derivative with respect to $\theta$, \begin{equation*} \dfrac{\partial}{\partial \theta} \ln \mathcal{L} (\theta, \sigma) = \dfrac{1}{\sigma^2} \sum_i (X_i - \theta) = \dfrac{1}{\sigma^2} n(\bar{x} - \theta) \\ \end{equation*} Setting equal to 0 and solving for $\theta$, \begin{equation*} \boxed{\hat{\theta} = \bar{x}} \end{equation*} Consequently, $\boxed{\hat{\theta} = \bar{x} = 4.265}$. \item Generate bootstrap samples (20 data points each) from the distribution with the estimated parameter (i.e., $N(\hat{\theta},2)$). \texttt{See listing below for code} \item In each bootstrap sample, estimate the value of $\theta$ again based on the simulated data. \texttt{See listing below for code} \item Compute the standard deviation for the estimated parameter among the bootstrap samples. This standard deviation is the estimated standard error for $\theta$. The estimated standard error for $\theta$ was 0.453. \begin{lstlisting}[language=Python, caption=Parametric Bootstrap code] # Compute theta_hat for this distribution model (using MLE) theta_hat = np.mean(data) print(f"Estimator for the mean of theta (theta_hat): {theta_hat}") # Generate bootstrap samples, i.e. simulate the data from source sim_data = [np.random.normal(theta_hat,2,100) for _ in range(1000)] # Estimate the value of theta_hat in each bootstrap sample theta_hats = [np.mean(sim_sample) for sim_sample in sim_data] # Compute the standard error fot theta_hat theta_hat_se = np.std(theta_hats) print(f"Standard error for theta_hat: {theta_hat_se}") \end{lstlisting} \end{enumerate} \pagebreak \section{Bayesian Inference} \begin{enumerate}[label={(\alph*)}] \item Assume we have observed IID data points $X_1,\ldots,X_{10}$ from the distribution $N(\theta,1)$, where the sample average, $\bar{X}$, is 1.68. If our prior belief about $\theta$ can be described by $N(0,3)$, compute the posterior distribution of $\theta$ after observing this data. % \begin{table}[h] % \centering % \begin{tabular}{@{}cccc@{}} % \toprule % Parameter & \texttt{mean} & \texttt{se\_mean} & \texttt{sd} \\ % \midrule % $\theta$ & 1.33 & 0.01 & 0.39 \\ % $\sigma$ & 1.24 & 0.01 & 0.35 \\ \bottomrule % \end{tabular} % \end{table} We know that the posterior distribution is given by \begin{align*} f(\theta|X_1,\ldots,X_{10}) \propto \mathcal{L}(\theta)f(\theta) \end{align*} \begin{align*} \mathcal{L}(\theta)f(\theta) &= \prod_{i=1}^{n} \dfrac{1}{\sqrt{2\pi\sigma^2}} \exp{\left\{-\dfrac{(X_i - \theta)^2}{2\sigma^2}\right\}} \dfrac{1}{\sqrt{2\pi\sigma_0^2}} \exp{\left\{ - \dfrac{(\theta - \theta_0)^2}{2\sigma_0^2}\right\}} \\ &= \dfrac{1}{\sqrt{(2\pi\sigma^2)^n}}\exp{\left\{-\dfrac{1}{2\sigma^2} \sum_i{\left(X_i - \theta \right)^2} \right\}} \dfrac{1}{\sqrt{2\pi\sigma_0^2}} \exp{\left\{ - \dfrac{(\theta - \theta_0)^2}{2\sigma_0^2}\right\}} \\ &= \dfrac{1}{(2\pi)^\frac{n+1}{2} \sqrt{\sigma^{2n} \sigma_0^2}} \exp{\left\{ \dfrac{\theta^2 + 2\theta \theta_0 - \theta_0^2}{2\sigma_0^2} -\sum_{i=1}^n \dfrac{X_i^2 2\theta X_i + \theta^2}{2\sigma^2}\right\}} \\ &\propto \exp{\left\{ \dfrac{-\theta^2\sigma^2 + 2\theta\theta_0\sigma^2 - \theta_0^2\sigma^2 - (\sum_i X_i^2\sigma_0^2 - -2\sum_i X_i\theta\sigma_0^2 + n\theta^2\sigma_0^2)}{2\sigma_0^2\sigma^2}\right\}} \\ &\propto \exp{\left\{ \dfrac{-\theta^2(\sigma^2 + n\sigma_0^2) +2\theta(\theta_0 \sigma^2 + \sum_i X_i\sigma_0^2) - (\theta_0^2\sigma^2 + \sum_i X_i^2 \sigma_0^2)} { 2\sigma_0^2 \sigma^2} \right\}} \\ \begin{split} &\propto \exp{\left\{ \dfrac{-\theta^2 + 2\theta \left(\dfrac{\theta_0\sigma^2 + \sum_i X_i\sigma_0^2}{\sigma^2 + n\sigma_0^2}\right) - \left(\dfrac{\theta_0^2\sigma^2 + \sum_i X_i^2 \sigma_0^2}{\sigma^2 + n\sigma_0^2}\right)^2}{\dfrac{2\sigma_0^2\sigma^2}{\sigma^2 + n\sigma_0^2}}\right\}} \\ &\qquad\qquad \times \exp{\left\{-\dfrac{\theta_0^2\sigma^2 + \sum_i X_i^2 \sigma_0^2} {2\sigma_0^2\sigma^ r}\right\}} \end{split}\\ &\propto \exp{\left\{-\dfrac{\left( \theta - \dfrac{\theta_0^2\sigma^2 + \sum_i X_i^2 \sigma_0^2}{\sigma^2 + n\sigma_0^2}\right)^2}{2\dfrac{\sigma_0^2 \sigma^2}{\sigma^2 + n\sigma_0^2}}\right\}} \end{align*} Here we can define: \[ \sigma_1^2 = \dfrac{\sigma_o^2 \sigma^2}{\sigma^2 + n\sigma_0^2} = \dfrac{1}{\frac{\sigma^2 + n\sigma_0^2}{\sigma_0^2\sigma^2}} = \dfrac{1}{\frac{1}{\sigma_0^2} + \frac{1}{\frac{\sigma^2}{n}}} = \left( \dfrac{1}{\sigma_0^2} + \dfrac{1}{\frac{\sigma^2}{n}} \right)^{-1} \] and \[ \theta_1 = \dfrac{\theta_0\sigma^2 + \sum_i X_i \sigma_0^2}{\sigma^2 + n\sigma_0^2} = \dfrac{\theta_0\sigma_0^{-2} + \sum_i X_i \sigma^{-2}}{\sigma_0^{-2} + n\sigma^{-2}} = \sigma_1^2 \left(\theta_0 \sigma_0^{-2} + \sum_i X_i \sigma^{-2}\right) = \sigma_1^2 \left(\dfrac{\theta_0}{\sigma_0^2} + \dfrac{\bar{X}}{\frac{\sigma^2}{n}}\right) \] which shows that the resulting expression for the posterior distribution corresponds to a normal distribution with mean $\theta_1$ and standard deviation $\sigma_1$, i.e. $N(\theta_1,\sigma_1)$. Plugging in our values, we have: \[ \sigma_1^2 = \dfrac{\sigma_0^2\sigma^2}{\sigma^2 + n\sigma_0^2} = \dfrac{(3)^2(1)^2}{(1)^2 + 10(3)^2} = 0.0989 \rightarrow \boxed{\sigma_1 = 0.314} \] and \[ \theta_1 = \sigma_1^2 \left(\dfrac{\theta_0}{\sigma_0^2} + \dfrac{\bar{X}}{\frac{\sigma^2}{n}}\right) = 0.0989 \left(\dfrac{0}{3^2} + \dfrac{1.68}{1/10}\right) \rightarrow \boxed{\theta_1 = 1.66} \] \item Assume we have gathered more evidence about $\theta$ in part (a) by experimenting with another signal related to the same source and gathering new data points $Z_1, \ldots, Z_{20} \overset{\text{\emph{iid}}}{\sim} N(\theta,4)$. If $\bar{Z} = 0.8$, compute the new posterior distribution of $\theta$ after seeing both samples. Assume samples in part (a) and (b) are independent from each other. Considering the prior distribution as $N(\theta_1, \sigma_1) = N(1.66,0.314)$, the posterior distribution given the new data $Z_1, \ldots, Z_{20} \sim N(\theta,4)$ is: \[ \sigma_2^2 = \dfrac{\sigma_1^2\sigma^2}{\sigma^2 + n\sigma_1^2} = \dfrac{(0.0989)(4)^2}{(4)^2 + 20(0.0989)} = 0.088 \rightarrow \boxed{\sigma_2 = 0.297} \] and \[ \theta_2 = \sigma_2^2 \left(\dfrac{\theta_1}{\sigma_1^2} + \dfrac{\bar{X}}{\frac{\sigma^2}{n}}\right) = 0.088 \left(\dfrac{1.66}{0.0989} + \dfrac{0.8}{4^2/20}\right) \rightarrow \boxed{\theta_2= 1.565} \] \item How does your inference result change if your sample in part (b) had more data points? As the number of data points $n$ increases, the posterior distribution becomes more influenced by the likelihood function of the observed data. Eventually, for very large values of $n$, the prior distribution vanishes and the Bayesian inference method yield a similar result to a frequentist approach. \item How does your inference result change if you had more certainty about your prior belief (i.e. the prior distribution had lower variance)? The posterior distribution would be closer to the prior distribution, provided that the observed data didn't indicate a significantly different distribution. %\begin{lstlisting}[language=Python, caption=Bayesian Inference Code] %import pystan % %n = 10 %x_bar = 1.68 %y = np.random.normal(x_bar, 1, n) % %model_code = """ %data { % int<lower=0> n; % real y[n]; %} %parameters { % real<lower=0,upper=100> theta; % real<lower=0,upper=10> sigma; %} %model { % y ~ normal(theta,sigma); %} %""" %norm_dat = { % 'n': 10, % 'y': y %} % %# Fit the model with the original data %model = pystan.StanModel(model_code=model_code) %fit = model.sampling(data=norm_dat, iter=1000, chains=4, n_jobs=2) %print(fit) % %# Fit the model with the new data %new_dat = { % 'n': 20, % 'y': np.random.normal(0.8, 4, 20), %} % %fit2 = model.sampling(data=new_dat) %print(fit2) %\end{lstlisting} \end{enumerate} %\bibliographystyle{plain} %\bibliography{references} \end{document}
{ "alphanum_fraction": 0.6162450067, "avg_line_length": 40.0319829424, "ext": "tex", "hexsha": "2112ed077d080a38e658dbf77a8d92534ab39a0e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1648339d10238c6a9baa261ee7a367607e6385a2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "cmunozcortes/cs249", "max_forks_repo_path": "hw/hw1/hw1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1648339d10238c6a9baa261ee7a367607e6385a2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "cmunozcortes/cs249", "max_issues_repo_path": "hw/hw1/hw1.tex", "max_line_length": 305, "max_stars_count": null, "max_stars_repo_head_hexsha": "1648339d10238c6a9baa261ee7a367607e6385a2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "cmunozcortes/cs249", "max_stars_repo_path": "hw/hw1/hw1.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6322, "size": 18775 }
\section{201509-4} \input{problem/5/201509-4-p.tex}
{ "alphanum_fraction": 0.7307692308, "avg_line_length": 17.3333333333, "ext": "tex", "hexsha": "4db6f3f4ad3670e9b479fbabfc2c14f03b823374", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "xqy2003/CSP-Project", "max_forks_repo_path": "problem/5/201509-4.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "xqy2003/CSP-Project", "max_issues_repo_path": "problem/5/201509-4.tex", "max_line_length": 32, "max_stars_count": 1, "max_stars_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "xqy2003/CSP-Project", "max_stars_repo_path": "problem/5/201509-4.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:47:19.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-14T01:47:19.000Z", "num_tokens": 22, "size": 52 }
\chapter{Discussion} In this chapter we are discussing what conclusions we can draw from our results presented in sec. \ref{sec:results} and in which directions future research could go. % ======================================= \section{Conclusion} % ======================================= \subsection{The Quality of Our Topic Models} The results from our measurements regarding the quality of our topic models indicate that the created topic models tend to be stable in their quality when varying the seed and even with slightly changed dictionaries. Furthermore, topic models created with classic LDA tend to have a higher quality than the ones created with neural LDA. Topic models from neural LDA have an interesting inverse peak at 3 topics and tend to produce topic models with nearly the same quality for a topic size of 5 or more when using ancestral sampling. In the original papers of GPT-2 \cite{gpt-2} and Transformer-XL \cite{transformer-xl}, the coherence score $C_v$ for their testset is between $0.35$ and $0.60$. Thus, our topic models have a good correlation to human topic-ranking data, especially those with a higher number of topics which can be seen in table \ref{fig:summary}. Regarding different sampling techniques, we see that on average Top-P and even more so Typical sampling clearly improve the quality of topics created by topic models. \begin{figure}[H] \centering \includegraphics[width=1\textwidth]{figures/Unigrams-cv-var-table-is} \caption{$C_v$ topic coherence score table with min and max mean values computed over all topic models.} \label{fig:summary} \end{figure} % ======================================= \subsection{Comparing the Semantic Spaces} The results from the comparison between topic models with corpora of 100'000 documents clearly show dissimilarities between the sampled corpus and the corpus the GPT-2 model was trained on. With our variance analysis, we can say with a good degree of confidence that these findings are solid for a topic size of 10 or more. In general, we expect the two topic distributions from training dataset and generated dataset to be similar, as the goal of our language model is to learn the language of the corpus it is trained on. This indicates that there is something that skews the model's topic distribution. To find out what is causing this, we have to look at the results from the comparison of topic models with corpora of 10'000 documents. First, through our variance analysis for topic models of corpora with 10'000 documents, we have to take a variation of up to $0.1$ for a topic size of 5 or more into account. This makes the interpretation rather vague and we conclude that topic models from corpora with 10'000 documents are not ideal for interpretations. Nonetheless, in our comparison we see that dissimilar topic models result in a value of $0.5$ or higher. This strongly correlates to our findings with corpora of 100'000 documents. For similar topic models, the values tend to be under $0.2$ for a topic size of 10 or lower. For a topic size higher than 10, the area of similar topic models tends to be under $0.4$. This differs from our first findings where the area for similar topic models remains under $0.2$ across all topic sizes. The comparison of the topic models from the sampled corpus and the corpus the GPT-2 model, respectively the Transformer-XL model, was trained on, lies in the area of similarity. This again differs from our first findings where we see a clear discrepancy. We can conclude that if there is a strong inductive bias from the model architecture, we can assume we would see some discrepancies even in topic models from corpora with just 10'000 documents. Concerning the differences between the GPT-2 and Transformer-XL results, we expect the topic models created from the original Transformer-XL model to behave similarly to the model we trained ourselves. This is because the original Transformer-XL model was also trained on the Wikitext103 corpus. This assumption is reflected in our results for \texttt{trafo\_xl\_ours} vs. \texttt{trafo\_xl}. It also makes sense that the results from GPT-2 and Transformer-XL did not differ much. Both underlying architectures are based on the decoder model from the Transformer architecture. It backs our proposed approach because we can see similar results with a similar language model architecture. As for the differences between the classic LDA and the neural LDA, we come to the same conclusions as with classic LDA. The difference of the learned semantic space and the original one tend to appear similar. There is no clear indication that something skews with the topic distributions. What can clearly be seen in the graphs is that both topic model techniques have the same area of similarity and area of dissimilarity for a corpus size of 10'000 documents. This also backs our proposed approach as the produced results are very similar across different topic modeling techniques. As for corpora created with different sampling techniques (ancestral, Top-P and Typical sampling), we see that different sampling methods always lead to a skewed topic distribution. By looking at the red lines in all the graphs with all three sampling methods (sec. \ref{sec:comp}, the graphs on the right), we can see that Top-P sampling and even more Typical sampling in general influence the topic distributions. We therefore conclude that the sampling methods Top-P and even more so Typical sampling introduce a generalization to the predicted output which overrules some of the meanings in the semantic spaces. Such an impact is rather unlikely if there is a strong inductive bias in the language model architecture. % ======================================= \subsection{Summary} With our new approach to probing language models, we show that by analyzing the differences from various topic distributions, we gain a better understanding of the characteristics of the language learned by today's language models. Furthermore, we are closer than before to being able to identify the isolated impact the inductive bias has on our metric. With this, we show a way to finding out what components of the learning algorithm lead to certain inductive biases. We find discrepancies in the semantic space that are linked to the different methods and parameters we applied but also to the language model algorithm itself. We give a baseline for how to use our method, and build a foundation on which further research can be built. % ======================================= \section{Further Work} After providing the groundwork, our method can be further enhanced and explored in various ways. By focusing on the topic model part, the method itself can be extended to work with non-LDA-based topic models \cite{karami2018fuzzy, zhang2022pre}. Additionally, a different approach to tokenization and dictionary creation can lead to more stable and more meaningful topic models which is reflected in the metric. This can help to better identify the differences in the semantic space over corpora of different sizes. We tried incorporating bigrams and trigrams into our topic models but could not to see any notable difference between them. This does not mean they have no influence, just that our implementation did not show any. By putting the focus on the language model, the method itself can be extended to work with other language model architectures, e.g. encoder based architectures. Additionally, different approaches to train the language model, like changing the loss function and the training parameters, can provide an insight into the influence of architectural properties on the semantic space. All in all, comparing the behaviour of our metric with different language models might tell us what components of the learning algorithm lead to certain inductive biases, which leads in itself to a change in the choice of language model for specific downstream tasks. Furthermore, by changing the training data for the language models, i.e. using a completely different dataset or an altered version, can give us an insight into how the inductive bias of one language model architecture affects the learning behaviour with different learning data. This might lead to conclusions on how today's language models are trained and how we utilize training data. We also suggest to test the stability of our method for a higher corpus size than 10'000 documents, as this will provide answers to how reproducible the results of our metric are and how the language models inductive bias is reflected on different corpus sizes. If we consider a totally different direction instead of focusing on the inductive bias, we can shift our view to how we can utilize the learned topic distributions from a language model. We could try to influence the probability of a produced string of a language model by combining the learned topic distribution with the intermediate conditional probability over words from a language model. This could help us increase the probability of a string with a certain topic, which is especially interesting when the topic is not very common in the learned language. By analyzing the topics themselves, we could see where the semantic space differs on a word level. Comparing the topic-word distributions with the results or findings from downstream tasks, we could draw conclusions about what specific bias those models exert in those downstream tasks.
{ "alphanum_fraction": 0.7927937447, "avg_line_length": 205.7391304348, "ext": "tex", "hexsha": "a43615c777ef48787371348b53561505333f3b24", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1f77c86c0e5d7113f28a4ef092209290a0382f9a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Frankie8472/masters-thesis", "max_forks_repo_path": "thesis_report/chapters/discussion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1f77c86c0e5d7113f28a4ef092209290a0382f9a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Frankie8472/masters-thesis", "max_issues_repo_path": "thesis_report/chapters/discussion.tex", "max_line_length": 1257, "max_stars_count": null, "max_stars_repo_head_hexsha": "1f77c86c0e5d7113f28a4ef092209290a0382f9a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Frankie8472/masters-thesis", "max_stars_repo_path": "thesis_report/chapters/discussion.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1875, "size": 9464 }
%\nomenclature[]{UE4}{Unreal Engine 4} %\nomenclature[]{RAV}{Robotic Aerial Vehicle} \subsection{Evaluation Criteria}\label{subsec:EvalCriteria} In order to design the virtual simulation environment, it was important to first identify the criteria that would determine its usefulness. We began by identifying key physical phenomena that would need to be present, or integrated in future iterations. This was based on an operational scenario that was outlined in the ROCSAFE \href{https://www.nuigalway.ie/rocsafe/research/}{Deliverable 2.4: Detailed Use Cases}\footnote{\href {https://www.nuigalway.ie/rocsafe/research/}{https://www.nuigalway.ie/rocsafe/research/}}. We modelled Use Case 1, with a brief description as follows:\par "\textit{The scene is set in a rural location, with some forest, a twin track rail line, a small town 10Km away with a small road running near to the rail tracks with access to the rail tracks. The conditions are cool and dry, with a light breeze. A train has been derailed and heavy machinery has been used to damage the tracks. Radioactive material in containers has been exposed. It is intended to use autonomous aerial and ground vehicles to remotely survey and document the scene. They will then be used to localize forensic evidence, subsequently leading to remote evidence collection.}".\par The result of modelling this scenario using UE4 is shown in Figures \ref{fig:finalVirtualEnv1} and \ref{fig:finalVirtualEnv2}. %Should talk about the rad. environment here but not sure of dissemination status. \par In relation to the research question stated above, we identified the most salient data used in hazardous scene management and how it can be realistically generated: \begin{itemize} \item ROCSAFE and other CBNR scene management reference manuals frequently state the value of a visual overview of the scene \cite{StandardizationOffice2012AJP-3.8ADefence}. In order to generate realistic simulated images, the simulation will need to have high-resolution rendering capabilities. \item Further to the above point, the images will need to be generated from the perspective of a RAV in order to be of use when prototyping object detection modules and other AI systems related to ROCSAFE.% This is a key aspect of the ROCSAFE project. \item The scene will need to have a configurable physical layout so that occlusion, shadows and other real-world aspects will be present in images. A non-uniform topography is also important for planning potential paths to sources of evidence for sampling. \item For the scene described above, a sensor which can simulate radiation readings is necessary, which will record noisy readings that are affected by real-world phenomena such as occlusion. \item All data must have timestamps and spatial information associated with it. The spatial information should use a real-world coordinate reference system, such as GPS. \end{itemize} %The core components that the simulation of this scenario required were identified and we describe them in terms of evaluation criteria as follows: We used the above points to discern some technical requirements that the simulation environment would need to achieve, which form our evaluation criteria: %\note{This is probably the most important part of this chapter} \begin{itemize} \item The ability to place arbitrary realistic virtual representations of physical objects in the scenario in various configurations. \item The ability to render high-quality images from the scenario from arbitrary locations at arbitrary resolutions, with common visual phenomena present, such as shadows and lens flare. \item There must be sufficient detail in the scene to introduce some noise to image processing problems. This means avoiding a highly-uniform configuration. \item Multiple heterogeneous simulated aerial vehicles, with a high-level API for sensing and navigation for each vehicle. The API should not be platform-specific so that different types of vehicle may be considered. \item Simulated sensor readings from the aerial vehicles, including position, velocity, altitude, that depend on the state of the aerial vehicle in the environment. For example, if the aerial vehicle is close to a source of radiation, the simulated sensor reading should be high. \item Simulation software should have a permissive licence and should permit publications that include details of the software. \item The ability to run the simulations at an increased speed without corrupting the fidelity of the sensor/actuator functionality. \item The ability to quickly change the configuration of the simulation. \end{itemize} \note{More can be added to this list.} \subsection{Existing Tools and Software} In order to provide this functionality, it is clear that using existing tools would be desirable, as writing low-level code for tasks such as rendering would take a prohibitive amount of time. Game engines have been increasingly used for simulations of physical phenomena, with a growing interest in niche areas. Examples include generating high-fidelity training data for computer vision algorithms, \cite{Qiu2016UnrealCV:Engine}, deep learning algorithms \cite{Gaidon2016VirtualAnalysis}, automated crowd size estimation algorithms \cite{Lee2018DigitalCrowds} and target tracking algorithms \cite{Mueller2016ATracking}. An overview of literature related to the use of game engines in creating simulation software is outlined in Section \ref{subsec:GameEngineReview}. The overview describes how most modern simulation software that use game engine components are mature in their capabilities to model and render physical scenarios, but not all provide good support for the use of robotic vehicles. %\note{want to get across that basic rendering etc. is offered by many platforms, real issue is to find something to work with that can provide support for Remotely Operated Vehicles / Autonomous vehicles} This meant that an emphasis was placed on choosing software with some capability to implement high-fidelity simulated aerial vehicles as well as physics and graphics rendering. %Specific functionality can commonly be added to games engines using plugins, which are usually specific to an individual games engine. Standalone simulation tools were considered alongside tools designed to be run using a game engine. The simulation software whose potential use for designing a custom simulation environment for disaster scene management that were investigated are shown in Table \ref{table:SimulatorComparison}, with more detailed overviews provided in \cite{Ebeid2018ASimulators}. UE4, combined with the Airsim plugin, was chosen to develop the simulation since the documentation for both suggested that it could meet all of the evaluation criteria outlined above. We give more details of the integration of AirSim in Section \ref{sec:AirSimIntegration}.\par %\note{Might be better off presenting this as a table. %Format could be Simulator | Licence | Implementation Language | Supported OS | %Developer | Additional Notes} %\begin{itemize} %Provide a brief description of each. % \item Gazebo: A free, open-source standalone simulator written in C++. Development began at the University of Southern California, now maintained by the Open Source Robotics Foundation. \cite{Koenig2005DesignSimulator} % \item Airsim: A free, open-source C++ plugin for UE4 developed by Microsoft AI \& Research. MIT Licence. \cite{Shah2017AirSim:Vehicles} % \item jMAVSim: A free, open- % \item HackFlightSim % \item RotorS % \item Morse % \item New Paparazzi Simulator %\end{itemize} \subsection{Virtual Simulation Environment Design Using UE4} %\note{might need to re-word this. This section should be about the design process that was used once Unreal Engine had be determined the platform of choice to develop with} %The design process of any simulation or game using UE4 should follow certain good practices in order to avoid some common pitfalls that can cause serious delays in development. Once an environment has been built using UE4, it can be labour-intensive to radically alter it \cite[p.~454]{Rouse2004GamePractice}. This suggests that care should be taken to ensure to plan the design process so that consistent and efficient results can be achieved. %Most material related to design of simulations and games using UE4 are highly qualitative, addressing questions like "\textit{what does the player actually want and how can that be delivered?}" rather than describing how the process of designing the simulation should be carried out. Some good level design practices are noted in \cite{Rouse2004GamePractice}, although many are not applicable to the design of a serious simulation. Notable points from \cite{Rouse2004GamePractice} are summarised: \begin{itemize} \item Pencil and paper sketches of the level's general layout can be a very good idea in order to avoid the perils of "\textit{designing yourself into a corner}". As an example, this could manifest itself as underestimating the proximity between two high-level objects (e.g. a hall and a room), which may lead to a large-scale redesign further down the design process. \item During the first pass, do not worry overly about textures and geometry, focus on ensuring that the layout is realistic. \item Refine the architecture once a realistic layout has been identified \item Add basic gameplay once the physical layout has taken shape. This avoids a tight coupling between the two. \item The final step is to refine gameplay and aesthetics. \end{itemize} Using these concepts as a basis, the design process for the simulation environment proceeded roughly as follows, bearing in mind the evaluation criteria identified at the start of the chapter: \begin{enumerate} \item A sketch of the layout was drawn up based on an operational scenario outlined in Deliverable 2.4 of the ROCSAFE project. \item The landscape was sculpted and painted using UE4 in-built landscaping tools. \item We identified the assets that would be necessary to develop the environment to our specification. We acquired these assets from websites including \href{http://www.cgtrader.com}{CGTrader}\footnote{\href {http://www.cgtrader.com}{https://www.cgtrader.com}} and \href{https://3dwarehouse.sketchup.com/?hl=en}{Google 3D warehouse}\footnote{\href {https://3dwarehouse.sketchup.com/?hl=en}{https://www.3dwarehouse.sketchup.com}}. \item We imported into \emph{Autodesk 3DS} in order to ensure that textures were of sufficient quality to facilitate the planned image processing on collected images. \item We then imported into UE4 as static meshes and placed into the scene according to the sketch. In order to ensure that this process can scale, we ensured that static meshes could be replaced by simply importing a new mesh which retains the position of the original in the environment. \item We qualitatively evaluated the rendered images to determine their suitability. \item We archived the environment before integrating the Airsim aerial vehicle simulator plugin. \item We then integrated the Airsim \cite{Shah2017AirSim:Vehicles} with some modifications, which provided aerial vehicles simulation in UE4. This step is non-trivial and details of how this was done is outlined in Section \ref{sec:AirSimIntegration}. \end{enumerate} %\note{Would be good to discuss some of the challenges met while developing the simulation and how they were overcome} \subsection{Design Process without Dynamic Elements} This process was carried out iteratively and the results of the developed world excluding the dynamic elements are discussed here. Most changes took place directly within the UE4 Editor. The chronological development of the virtual world is shown in Figures \ref{fig:virutalEnvDevelopment1} and \ref{fig:virutalEnvDevelopment2}. The Figures on the i$_{th}$ row corresponds to the i$_{th}$ distinct version of the physical layout of the virtual world. The first iteration has flaws that were improved throughout the development process. The major problems that we addressed were: \begin{itemize} \item Rendered images were of poor quality due to incorrect UV texture mappings %UV mapping is a technique used to "wrap" a 2D image texture onto a 3D mesh. on objects. \item Textures were of poor quality and highly uniform. \item The layout of the environment was highly uniform. Note that the rail tracks are perfectly straight. \item The scene was minimalist and lacking any convincing detail which would introduce noise to the AI algorithms being developed as part of ROCSAFE. \end{itemize} Although a human may recognise the semantics of the scene, it does not capture some key aspects identified in the evaluation criteria. In order to be of real value to the ROCSAFE project for tasks such as training the automation of localising an object using aerial vehicles, it was necessary to address these major problems. Improvements are shown in Figures \ref{fig:virutalEnvDevelopment1} and \ref{fig:virutalEnvDevelopment2} and the techniques used to make these improvements are discussed in Section \ref{subsec:TechniquesImproveRealism}. %\note{Might be worth making the margins wider for table of images. Also might be worth recording images again with fixed exposure and orientations for consistency. Come back to this once talked about it with Michael and Frank. Different versions of environment listed at bottom of this file.} %\note{Will ideally have all of these figures on a single page} %\note{Current format is top view, isometric view, side view. This can be changed but will take quite a lot of effort.} %\note{Might be better to have this landscape instead} \begin{landscape} \captionsetup[subfigure]{labelformat=empty} \begin{figure} \centering \begin{tabular}{ccc} \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV1/TopView1.png}} & \subfloat[Basic configuration.]{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV1/IsometricView1.png}} & \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV1/LowView1.png}}\\ %\subfloat[Here is my caption that is longer than my %figure.]{\makebox[.45\textwidth]{\rule{1in}{1in}}}% %\hfill %2nd line of images \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV2/TopView.png}} & \subfloat[Spline added for rail, basic foliage added and landscape texture improved]{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV2/IsometricView.png}} & \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV2/LowView.png}}\\ %3rd line of images \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV3/TopView1.png}} & \subfloat[Dirt track and smoke/steam effect added]{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV3/IsometricView1.png}} & \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV3/LowView1.png}} \end{tabular} \caption{Evolution of the Simulation Environment. Each row depicts images from a subsequent iteration of the environment.} \label{fig:virutalEnvDevelopment1} \end{figure} \begin{figure} \pagebreak \begin{tabular}{ccc} %4thline of images \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV4/TopView2.png}} & \subfloat[Splined wall and houses added]{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV4/IsometricView2.png}} & \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV4/LowView2.png}}\\ %5thline of images \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV5/TopView.png}} & \subfloat[Road and extra walls added]{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV5/IsometricView.png}} & \subfloat{\includegraphics[width = 7cm]{Chapters/SimulationEnv/Figs/VirtualEnvV5/LowView.png}} \end{tabular} \caption{Evolution of the Simulation Environment. Each row depicts images from a subsequent iteration of the environment.} \label{fig:virutalEnvDevelopment2} \end{figure} %\captionsetup[subfigure]{labelformat=default} \end{landscape} %\note{Might not be necessary to talk about all of these things} \begin{landscape} \begin{figure} \centering \begin{tabular}{cc} \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/IsometricView1.png}} & \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/LowView.png}} \\ \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/TopView2.png}} & \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/BridgeView1.png}} \\ \end{tabular} \caption{Images From Final Version of Simulation Environment} \label{fig:finalVirtualEnv1} \end{figure} \begin{figure} \begin{tabular}{cc} \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/CloseUp1.png}} & \subfloat{\includegraphics[width = 10.7cm]{Chapters/SimulationEnv/Figs/VirtualEnvFinal/CloseUp2.png}} \end{tabular} \caption{Images From Final Version of Simulation Environment} \label{fig:finalVirtualEnv2} \end{figure} \end{landscape} \pagebreak \subsection{Techniques used to Improve Realism}\label{subsec:TechniquesImproveRealism} \subsubsection{Blueprint Visual Scripting System}\label{subsubsec:blueprints} UE4 uses a visual scripting system to provide a lot of functionality, known as \textit{Blueprints}. We provide a brief summary of Blueprints based on the UE4 \href{https://docs.unrealengine.com/en-US/Engine/Blueprints/index.html}{documentation page}\footnote{\href {https://docs.unrealengine.com/en-US/Engine/Blueprints/index.html}{https://docs.unrealengine.com/en-US/Engine/Blueprints/index.html}} . Blueprints provide a node-based interface to create gameplay elements. To develop different aspects of a game, the system provides a visual approach to scripting, and many of the tools available in standard written scripting languages are available, such as typed variables, arrays, structs, loops, etc. Blueprints were used extensively to develop the simulation. %https://docs.unrealengine.com/en-US/Engine/Blueprints/index.html \subsubsection{Materials} %\note{grass, rail tracks} %\subfloat[caption]{\includegraphics[width = 4.5cm]{Chapters/SimulationEnv/Figs/RailScenarioFirstIteration.png}} & %\subfloat[caption]{\includegraphics[width = 4.5cm]{Chapters/SimulationEnv/Figs/VirtualEnvV1/resized_HighresScreenshot00001.png}} \\ \begin{wrapfigure}{r}{0.65\textwidth} \centering \includegraphics[width=0.65\textwidth]{Chapters/SimulationEnv/Figs/BlendedMaterialsVSNotBlendedMaterials/PoorTextures.png} \caption{Materials used in the first iteration} \label{fig:PoorTextures} \includegraphics[width=0.65\textwidth]{Chapters/SimulationEnv/Figs/BlendedMaterialsVSNotBlendedMaterials/HighQualityMaterial.png} \caption{Materials used in the final iteration} \label{fig:GoodTextures} %\caption{Contrast between initial and final materials used in simulation environment.} \end{wrapfigure} %https://docs.unrealengine.com/en-US/Engine/Rendering/Materials/IntroductionToMaterials/index.html The details of creating materials can be found in the \href{https://docs.unrealengine.com/en-US/Engine/Rendering/Materials/IntroductionToMaterials/index.html}{UE4 Documentation}\footnote{\href {https://docs.unrealengine.com/en-US/Engine/Rendering/Materials/IntroductionToMaterials/index.html}{https://docs.unrealengine.com/en-US/Engine/Rendering/Materials/IntroductionToMaterials/index.html}}, which we use as a reference for the following paragraph. Materials are made up of a number of components in UE4, which specify aspects such as colour, opacity, roughness, specularity and emissive colours. In order to produce realistic materials, it is necessary to blend and layer different textures as well as identifying the correct parameters for surface normals and specularity, among the other features. The UE4 editor is equipped with tools for modifying materials to achieve a high-fidelity output, which we accessed through the Blueprints interface. The landscape in the first iteration of the virtual environment consisted of a single uniform texture, with none of the above parameters specified. The result is shown in Figure \ref{fig:PoorTextures}. Subsequent versions used multiple layers and blending in order to create a higher-fidelity material, with relevant parameters tuned. The final version is shown in Figure \ref{fig:GoodTextures}. Figure \ref{fig:LandscapeMaterialBlueprint} shows how a Blueprint was used to create the landscape material, where a Landscape Layer Blend node is used to combine individual textures to create the material. \begin{figure} \centering \begin{tabular}{cc} \subfloat[Layers blended into Landscape Layer Blend Node \label{fig:MaterialLayerBlend}]{\includegraphics[width=6cm]{Chapters/SimulationEnv/Figs/BlendedMaterialsVSNotBlendedMaterials/LayerBlend.PNG}} & \subfloat[Blueprint used to create landscape material\label{fig:MaterialBlueprint}]{\includegraphics[width=8.5cm]{Chapters/SimulationEnv/Figs/BlendedMaterialsVSNotBlendedMaterials/LandscapeMaterial.PNG}} \end{tabular} \caption{Blueprints used to create the landscape} \label{fig:LandscapeMaterialBlueprint} \end{figure} \pagebreak \subsubsection{Splines} \begin{wrapfigure}{r}{0.65\textwidth} \centering \includegraphics[width=0.65\textwidth]{Chapters/SimulationEnv/Figs/SplineVSNoSplineExamples/PerfectlyStraightRail.png} \caption{Non-Splined rail section in first environment iteration} \label{fig:StraightRail} \includegraphics[width=0.65\textwidth]{Chapters/SimulationEnv/Figs/SplineVSNoSplineExamples/resized_SplineExample1.png} \caption{Splined rail section in final environment iteration}\label{fig:SplinedRail} \end{wrapfigure} Uniformity tends to be rare in the real world; perfectly straight lines do not occur naturally often. For this reason UE4 offers tools to create splines, along which the terrain can be deformed. We base our explanation on the official \href{https://docs.unrealengine.com/en-US/Engine/BlueprintSplines/index.html}{documentation page}\footnote{\href {https://docs.unrealengine.com/en-US/Engine/BlueprintSplines/index.html}{https://docs.unrealengine.com/en-US/Engine/BlueprintSplines/index.html}}. Splines are typically used to model roads and paths, but the Landscape Spline system is very flexible and can be used to model many different phenomena. In the initial stages of the simulation environment, only a perfectly straight section of rail could be sourced for use in the environment, as shown in Figure \ref{fig:StraightRail}. The spline tool allowed for much more realistic construction of the section of rail and accompanying wall, as shown in Figure \ref{fig:SplinedRail}. It was also used to create the road and the bridge. %\note{maybe provide link to docs} % \subsubsection{Foliage} Similar to the argument made in relation to splines, it is rare to have uniformly configured foliage in the real world. In order to address this, there exist foliage generation and editing tools in UE4 editor. Version 4.18 of the editor onward contains the \href{https://docs.unrealengine.com/en-US/Engine/OpenWorldTools/ProceduralFoliage/QuickStart/index.html}{Procedural Foliage Tool}\footnote{\href {https://docs.unrealengine.com/en-US/Engine/OpenWorldTools/ProceduralFoliage/QuickStart/index.html}{https://docs.unrealengine.com/en-US/Engine/OpenWorldTools/ProceduralFoliage/QuickStart/index.html}}, which is the most convenient way to add swathes of foliage to a scene. Since we were using other content dependent on UE 4.16, we opted to use the foliage painter tool, which allows the user to effectively paint foliage directly onto a landscape. It allows the user to specify a number of parameters to achieve the required density, scaling and other relevant features. The results of applying foliage to the scene are visible in Figures \ref{fig:Foliage3}, \ref{fig:Foliage4} and \ref{fig:Foliage5}. %\begin{landscape} %\begin{figure} %\centering %\begin{tabular}{ccc} %\subfloat[Layers blended into Landscape Layer Blend Node]{\includegraphics[width=7cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage2.png}}\label{fig:Foliage3} & %\subfloat[Blueprint used to create landscape material]{\includegraphics[width=7cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage5.png}}\label{fig:Foliage4} & %\subfloat[Blueprint used to create landscape material]{\includegraphics[width=7cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage6.png}}\label{fig:Foliage5} %\end{tabular} %\caption{Blueprints used to create realistic foliage} %\label{fig:RealisticFoliageBlueprints} %\end{figure} \begin{figure}[H] \centering \begin{tabular}{c} \subfloat[Layers blended into Landscape Layer Blend Node \label{fig:Foliage3}]{\includegraphics[width=12cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage2.png}} \\ \subfloat[Trees and shrubs generated with non-uniform configuration \label{fig:Foliage4} ]{\includegraphics[width=12cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage5.png}}\\ %--------- This breaks the page and begins the last figure on a new page ---------% \end{tabular} \caption{Examples of foliage in the simulation environment} \end{figure} \clearpage \begin{figure}[H] \ContinuedFloat \centering \begin{tabular}{c} %--------- This breaks the page and begins the last figure on a new page ---------% \subfloat[Woodland area effect \label{fig:Foliage5}]{\includegraphics[width=12cm]{Chapters/SimulationEnv/Figs/Foliage/Foliage6.png}} \end{tabular} \caption{Examples of foliage in the simulation environment} %\label{fig:RealisticFoliageBlueprints} \end{figure} %\begin{landscape} %\end{landscape} \subsubsection{Landscape Editing} %\note{Discuss here how dirt track was created using} %Creating a realistic landscape in UE4 serves a number of purposes. To realistically model the configuration of physical objects and their corresponding shadow and occlusion effects, it was necessary to model the terrain realistically. This was also desirable to allow the potential simulation of ground vehicles, so that difficulties that may be experienced in the real world, such as steep climbs or highly uneven surfaces, may be taken into account. UE4 provides a suite of landscaping tools that allows the user to create a highly variable landscape. The tools facilitate raising and flattening, smoothing, random noise and simulated erosion, as well as allowing for other more detailed modifications. These were used to create the railway embankment, the rutted path leading onto the rail tracks and the riverbank and riverbed, shown in Figure \ref{fig:LandscapeEditing}. \begin{figure}[H] \centering \begin{tabular}{c} \subfloat[Rutted Track]{\includegraphics[width=11.6cm]{Chapters/SimulationEnv/Figs/LandscapedVSNotLandscaped/RuttedTrack.png}}\label{fig:RuttedTrack} \\ \subfloat[Rail embankment]{\includegraphics[width=11.6cm]{Chapters/SimulationEnv/Figs/LandscapedVSNotLandscaped/RailwayEmbankment.png}}\label{fig:RailEmbankment} \\ %\end{tabular} %\end{figure} %\clearpage %\begin{figure}[H] %\ContinuedFloat %\centering %\begin{tabular}{c} \subfloat[Riverbank]{\includegraphics[width=11.6cm]{Chapters/SimulationEnv/Figs/LandscapedVSNotLandscaped/Bridge.png}}\label{fig:Riverbank} \end{tabular} \caption{Results of using the UE4 landscape editing tools} \label{fig:LandscapeEditing} \end{figure} \subsubsection{Asset Sourcing} % An \href{https://docs.unrealengine.com/en-US/Engine/Basics/AssetsAndPackages/index.html}{\textit{asset}}\footnote{\href{https://docs.unrealengine.com/en-US/Engine/Basics/AssetsAndPackages/index.html}{https://docs.unrealengine.com/en-US/Engine/Basics/AssetsAndPackages/index.html}} can be described as a piece of content for an Unreal Engine project, which has been serialized to a file. Assets can be re-used and modified in the UE4 editor, but are usually created using external software. UE4 uses assets that come in the Filmbox (.fbx) format, which is a proprietary file format owned by Autodesk. %\note{Do I need to reference this?}. Conversion tools do exist from other common asset file formats to fbx, but results can vary. Due to limited funding, time and experience, we decided to avoid creating assets from scratch but rather used assets that were free to use. Searching for free assets was a labor-intensive process, which consisted of a number of steps: \begin{enumerate} \item First, identify possible candidates for a particular type of asset (e.g. a train) based on a search of asset stores that offer free assets. We mainly used \href{https://www.cgtrader.com/}{CGTrader}\footnote{\href {https://www.cgtrader.com/}{https://www.cgtrader.com/}} , \href{https://www.turbosquid.com/}{TurboSquid}\footnote{\href {https://www.turbosquid.com/}{https://www.turbosquid.com/}} , \href{https://3dwarehouse.sketchup.com/?hl=en}{3D Warehouse}\footnote{\href {https://3dwarehouse.sketchup.com/?hl=en}{https://3dwarehouse.sketchup.com/?hl=en}} and \href{https://www.shapenet.org/}{ShapeNet}\footnote{\href {https://www.shapenet.org/}{https://www.shapenet.org/}}. \item Once a potentially suitable asset had been identified based on its description and preview, it was downloaded in the Filmbox (fbx) format if possible. Otherwise, it was downloaded in whatever format was available. \item The asset was opened in Autodesk \note{add reference} and visually inspected for suitability. If the textures and geometry were not of a sufficient standard the processes was restarted. \item If the asset was deemed suitable from the inspection in Autodesk, then it was exported in Filmbox format. \item The asset was then imported into the UE4 editor. Problems often arose in scaling, incorrect texture mapping and one-sided materials applied to the wrong side of assets. These problems could sometimes be addressed; if not we had to restart the process. \end{enumerate} %Talk about how well static world matches specification, how well rendered images perform for training some object detection etc. %Also talk about how the environment was packaged and open-sourced with permissive licence for general use % Not sure of exact ordering here % 1st Iteration J:\Work\David\ROCSAFEMidTermDemo\Code\UnrealEngine\AirSim\Unreal\Environments\Blocks % 2nd Iteration J:\Work\David\UnrealEngineRocsafe\OS_01RadIntegr % 3rd Iteration D:\ROCSAFEScenarios\OS01TestTemp % 4th Iteration D:\ROCSAFEScenarios\OS_01Radiation - D:\ROCSAFEScenarios\OS_01Radiation\Saved\Screenshots\Windows screenshot 11 % 5th \\ROCSAFE2\ROCSAFEGroupShared\ROCSAFEUnrealEngineOperatingScenarios\NotIntegratedAirSim % V1: Brussels Demo % V2: Rail with spline, train, digger. No dirt track, no houses, no road, no wall, poor textures, poor foliage % V3: Add wall, better foliage % V4:and houses % V5: Proper foliage (stones) & blended textures % V6: Final version in shared folder
{ "alphanum_fraction": 0.8016536902, "avg_line_length": 87.5549295775, "ext": "tex", "hexsha": "f7adbf8f3c7516e7b51db8713624575f3fbe843c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "754d975535e0da9a8e99cf31b651021698155c5b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "DavidLSmyth/ResearchMScThesis", "max_forks_repo_path": "Chapters/SimulationEnv/SimulationEvironmentDesign.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "754d975535e0da9a8e99cf31b651021698155c5b", "max_issues_repo_issues_event_max_datetime": "2019-06-18T11:59:42.000Z", "max_issues_repo_issues_event_min_datetime": "2019-06-18T11:59:42.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "DavidLSmyth/ResearchMScThesis", "max_issues_repo_path": "Chapters/SimulationEnv/SimulationEvironmentDesign.tex", "max_line_length": 1103, "max_stars_count": null, "max_stars_repo_head_hexsha": "754d975535e0da9a8e99cf31b651021698155c5b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "DavidLSmyth/ResearchMScThesis", "max_stars_repo_path": "Chapters/SimulationEnv/SimulationEvironmentDesign.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7420, "size": 31082 }
%!TEX root = ../thesis.tex \chapter{Introduction \label{ch:intro}}
{ "alphanum_fraction": 0.6811594203, "avg_line_length": 13.8, "ext": "tex", "hexsha": "c0f5fce0e45b6738706c0b4c6457ade5d256fcd6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tobinsouth/RandomResources", "max_forks_repo_path": "Writing/Latex/templates/UofA Thesis Template/chapter1/chapter.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tobinsouth/RandomResources", "max_issues_repo_path": "Writing/Latex/templates/UofA Thesis Template/chapter1/chapter.tex", "max_line_length": 39, "max_stars_count": null, "max_stars_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tobinsouth/RandomResources", "max_stars_repo_path": "Writing/Latex/templates/UofA Thesis Template/chapter1/chapter.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 20, "size": 69 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Krishnakanth - One Page Two Column Resume % LaTeX Template % Version 1.0 (24/04/2021) % % IMPORTANT: THIS TEMPLATE NEEDS TO BE COMPILED WITH XeLaTeX % % This template uses several fonts not included with Windows/Linux by % default. If you get compilation errors saying a font is missing, find the line % on which the font is used and either change it to a font included with your % operating system or comment the line out to use the default font. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TODO: % 1. Integrate biber/bibtex for article citation under publications. % 2. Figure out a smoother way for the document to flow onto the next page. % 3. Add styling information for a "Projects/Hacks" section. % 4. Add location/address information % 5. Merge OpenFont and MacFonts as a single sty with options. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CHANGELOG: % v1.1: % 1. Fixed several compilation bugs with \renewcommand % 2. Got Open-source fonts (Windows/Linux support) % 3. Added Last Updated % 4. Move Title styling into .sty % 5. Commented .sty file. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Known Issues: % 1. Overflows onto second page if any column's contents are more than the % vertical limit % 2. Hacky space on the first bullet point on the second column. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \documentclass[]{kk-resume-openfont} \usepackage{fancyhdr} \pagestyle{fancy} \fancyhf{} \begin{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LAST UPDATED DATE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \lastupdated %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TITLE NAME % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \namesection{Krishnakanth}{Yachareni}{ \urlstyle{same}\href{https://krishnakanthyachareni.github.io/personal-website/}{krishnakanth.com} | \href{https://www.linkedin.com/in/yacharenikrishnakanth/}{linkedin://yacharenikrishnakanth}\\ \href{mailto:[email protected]}{[email protected]} | 960.359.2104 } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % COLUMN ONE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{minipage}[t]{0.33\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EDUCATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Education} \subsection{Southern Mississippi} \descript{MS in Computer Science} \location{Present | Hattiesburg, MS} \sectionsep \subsection{JNTU Hyderabad} \descript{BS in Computer Science} \location{May 2017 | Hyderabad, India} College of Engineering \\ Manthani\\ \location{ GPA: 3.8 / 4.0} \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % LINKS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Links} LinkedIn:// \href{https://www.linkedin.com/in/yacharenikrishnakanth/}{\bf yacharenikrishnakanth} \\ Github:// \href{https://github.com/KrishnakanthYachareni}{\bf KrishnakanthYachareni} \\ StackOverflow:// \href{https://stackoverflow.com/users/6436720/yachareni-krishnakanth}{\bf yachareni} \\ Quora:// \href{https://www.quora.com/profile/Krishnakanth-Yachareni}{\bf Krishnakanth-Yachareni} \\ Facebook:// \href{https://www.facebook.com/yacharenikrishnakanth/}{\bf yacharenikrishnakanth} \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % COURSEWORK %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Coursework} \subsection{Graduate} Machine Learning \\ Advanced Algorithms \\ Cryptography \\ Advanced Robotic Systems \\ Parallel and Distributed Computing \\ \sectionsep \subsection{Undergraduate} Algorithms \& Data structures \\ Operating Systems \\ Compiler Design \\ Computer Networks \\ {\footnotesize \textit{\textbf{(Teaching Asst ) }}} \\ BigData Analytics \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % SKILLS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Skills} \subsection{Programming} \location{\bf Languages: } Java8 \textbullet{} Python \textbullet{} Javascript \textbullet{} Shell\\ \location{\bf Back End: } JavaEE \textbullet{} Spring Framework \textbullet{} MicroServices \textbullet{} REST/SOAP \textbullet{} Security\\ \location{\bf Front End:} Angular \textbullet{} ReactJs \textbullet{} HTML5 \textbullet{} CSS3 \textbullet{} Bootstrap \textbullet{} Velocity\\ \location{\bf Data Base:} SQL \textbullet{} NoSQL \textbullet{} PostgreSQL\textbullet{} Cassandra\\ \location{\bf Tools:} Docker \textbullet{} Kubernates \textbullet{} Jenkins \textbullet{} Maven \location{\bf Familiar:} NLP\textbullet{} CNN \textbullet{} Tensor-Flow \textbullet{} Selenium \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % COLUMN TWO % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{minipage} \hfill \begin{minipage}[t]{0.66\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EXPERIENCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Experience} \runsubsection{EPAM Systems} \descript{| Software Development Engineer | Team Lead } \location{Jan 2018 - June 2021 | Hyderabad, India} \vspace{\topsep} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Worked for Mastercard client, My role involved into exploration \& integration of various real-time global payment services such as PayPal, Visa, Amex, NPCI, MADA, FirstData. \item Developed the backend logic with a TDD approach and the frontend logic with the BDD approach and written automation scripts in shell. \item Worked with J2EE, Spring, Angular, SQL, NoSQL, Private cloud, Docker. \item Managed group of 10 people team and worked with global multi-disciplinary teams of engineers, architects, designers, product owners, and clients on a daily basis. \end{tightemize} \sectionsep \runsubsection{Tata Consultancy Services } \descript{| Assistant System Engineer} \location{June 2017 – Jan 2018 | Hyderabad, India} \begin{tightemize} \item 1 out of 1200 applicants chosen to be a TCS Fellow 2017. \item As a trainee attended the initial phase of ILP training on OOPs, Python, JavaEE. \item Collaborated with a small team of student designers to develop a new mini banking web application before graduating from the ILP training. \end{tightemize} \sectionsep \runsubsection{TASK/EXCITE,} \descript{| Software Engineering Intern } \location{May 2016 – Aug 2016 | Hyderabad, India} \begin{tightemize} \item A four month Summer Engineering Internship program organised by JNTUH in collaboration with TASK and HYSEA. \item Developed a real time IOT based android application for hostel searching, management, wherein an interactive platform is provided for non-local people \& Hostel/Mansion owners. \item Tools used in applications are Android, Python, PHP, MySQL, Java, and Raspberry Pi. \end{tightemize} % \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % PROJECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Projects} \runsubsection{WhatsApp Chat Analysis} \descript{| Personal} \location{Jan 2018 – Mar 2018 | Pune, India} It analyzes Whatsapp personal, group messages and shows complete statistics of chat owners, it also includes sentimental analysis. \sectionsep \runsubsection{Ostello} \descript{| Head Undergraduate Researcher} \location{April 2015 – July 2015 | Hyd, India} The ioT-based android application provides room searching, booking facilities \& the vendor side helps the hostel management system with help of a fingerprint scanner. \sectionsep \runsubsection{Programming Parser} \descript{| Head Undergraduate Researcher} \location{Jan 2017 – May 2017 | Hyd, India} Led the development of \textbf{Anello}, the custom programming language specification converts the Anello programs into a native java programs which can be executed by a java compiler. It reduces the size of code that should be written in java. \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % AWARDS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Awards} \begin{tabular}{rll} 2018 & top 1/500 & Best Software Engineer at EPAM Systems\\ 2017 & 1\textsuperscript{st}/500 & TCS Coding Competition, Hyderabad\\ 2016 & top 5/100 & Product Development Hackathon by TASK, HYSEA \\ 2011 & top 5 & State Level Online Chess competition \\ \end{tabular} \sectionsep \end{minipage} \end{document} \documentclass[]{article}
{ "alphanum_fraction": 0.660407462, "avg_line_length": 35.4260869565, "ext": "tex", "hexsha": "fbab3ce339c225725dd59801161aaec8e7fd4d83", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e27ea15df18c464865c98736c9b7fdbe524e0826", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "KrishnakanthYachareni/latex-resume", "max_forks_repo_path": "kk_resume-openfont.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e27ea15df18c464865c98736c9b7fdbe524e0826", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "KrishnakanthYachareni/latex-resume", "max_issues_repo_path": "kk_resume-openfont.tex", "max_line_length": 244, "max_stars_count": null, "max_stars_repo_head_hexsha": "e27ea15df18c464865c98736c9b7fdbe524e0826", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "KrishnakanthYachareni/latex-resume", "max_stars_repo_path": "kk_resume-openfont.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2027, "size": 8148 }
%!TEX root = ../chapter3.tex %****************************** % Discussion %***************************** \section{Discussion} The testes are among the most proliferative tissues in the adult body and ensure fertility via the continuous production of millions of sperm per day. Most developmental differentiation processes require the profiling of cellular populations at several time points \citep{Kernfeld2018, Scialdone2016, Wagner2018}. One of the exemptions is blood formation where commitment to different lineages can be profiled at once \citep{Dahlin2018}. Similarly, spermatogenesis occurs in continuous waves throughout the reproductive life span of animals. At any given time point, all intermediate cell types that arise across the ~35 day differentiation program are present in adult testes. This provided a powerful opportunity to capture and profile an entire differentiation process by profiling the transcriptomes of thousands of single-cells at a single time point. \\ We exploited the natural synchronisation of the first wave of spermatogenesis to identify key developmental transitions within the differentiation trajectory. In contrast, Chen \emph{et al.}, 2018 sorted synchronised spermatocyte and spermatid populations after blocking spermatogenesis with WIN 18,446. This allowed a strict enrichment for cells in specific stages during spermatogenesis but lost the natural trajectory of this continuous differentiation process \citep{Chen2018}. Profiling spermatogenesis in juvenile animals also naturally enriched for rare cell types that are under-represented in adults. In the case of haematopoiesis, cells need to be sorted to capture otherwise under-represented cell types \citep{Dahlin2018}. Among these rare cell types, spermatogonia are of particular interest as these cells not only sustain male fertility, but are also the origin of the vast majority of testicular neoplasms \citep{Bosl1997}. We obtained more than 1,100 transcriptional profiles for spermatogonia, allowing the identification of specific cell clusters within this heterogeneous cell population thus greatly improving the resolution over previous studies that only studied adult testes \citep{Lukassen2018}. Furthermore, our approach also enriched for and facilitated characterisation of the complexity within testicular somatic cell types. Among those are characteristic immune cells and precursor cells that only exist until a few days after birth.\\ Droplet-based scRNA-Seq can profile large number of cells simultaneously \citep{Klein2015, Macosko2015, Zheng2017}, but often captures cells with a wide range of transcriptional complexity. Consequently, droplet-based assays present a major computational challenge in distinguishing between (i) droplets contain transcriptionally inactive cells versus (ii) empty droplets that contain (background) ambient RNA. By using a stringent default threshold, we identified the majority of somatic and germ cell types in testes, similar to recent single-cell expression studies in mouse and human \citep{Lukassen2018, Xia2018, Chen2018}. In addition, we applied a statistical method to identify cells from droplet-based data by comparing the ambient RNA profiles \citep{Lun2018}, and were able to identify transcriptionally inactive leptotene/zygotene spermatocytes. This allowed us to bridge the developmental transition between spermatogonia and spermatocytes, thus providing a more complete view of the continuum of germ cell differentiation.\\ After the in-depth characterisation of germ and somatic cell types in adult testes, we profiled major developmental processes that occur during mouse spermatogenesis. During meiosis, we detect the expression of hundreds of genes associated with the developmental trajectory. Some of these genes show a sterility phenotype when perturbed and we reason that this is also the case for the majority of genes that follow the developmental trajectory in expression. Spermiogenesis is characterised by wide-scale chromatin rearrangements and we detect a clear increase in testis-specific histone variants, transition proteins and protamines during the late stages of sperm maturation. Again, genes that follow this trend could be important regulators that would cause sterility upon misexpression. \\ The transcriptional silencing of the sex chromosomes during meiosis and their subsequent partial re-activation post-meiosis is essential for male fertility \citep{Mahadevaiah2008}. Failure of \gls{MSCI} results in the expression of spermatocyte-lethal genes, as demonstrated for two Y chromosome encoded genes: \gls{Zfy} 1 and 2 \citep{Royo2010}. Our discovery that H3K9me3 is enriched during meiosis at spermatid-specific genes suggests a stronger, targeted repression in spermatocytes for a key subset of X-linked genes. The deposition of H3K9me3 is specific to MSCI in males, and is not observed during general meiotic silencing of unpaired chromosomes \citep{Cloutier2016, Taketo2013, Turner2004a}. Our finding that spermatid-specific genes are particularly enriched for H3K9me3 in spermatocytes suggests that their repression may be necessary for male fertility. \\ When profiling changes in variability over the differentiation trajectory, I detected a strong confounding effect between the variability measure and the correlation between expression and pseudo-time. Therefore, new measures of variability need to be derived to account for this dependency. For example, graph-based measures can assign a variability measure for each cell when comparing expression across a local neighbourhood. Next, fitting a generalized linear model between these variability estimates and the ordering of cells along pseudotime can be used to detect changes in variability. Nevertheless, confounding effects such as the expression level can obstruct such analysis.
{ "alphanum_fraction": 0.8203297637, "avg_line_length": 120.0612244898, "ext": "tex", "hexsha": "d3dcff955141d6b990a89bb0d68b2f236c94f5e4", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2020-05-07T18:32:52.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-22T16:28:49.000Z", "max_forks_repo_head_hexsha": "20bf4e22748cd4649bedcf91a6fb39caf07d1053", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "nilseling/Thesis", "max_forks_repo_path": "Chapter4/Chapt4_files/discussion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "20bf4e22748cd4649bedcf91a6fb39caf07d1053", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "nilseling/Thesis", "max_issues_repo_path": "Chapter4/Chapt4_files/discussion.tex", "max_line_length": 281, "max_stars_count": 8, "max_stars_repo_head_hexsha": "20bf4e22748cd4649bedcf91a6fb39caf07d1053", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "nilseling/Thesis", "max_stars_repo_path": "Chapter4/Chapt4_files/discussion.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-10T09:18:54.000Z", "max_stars_repo_stars_event_min_datetime": "2019-03-15T19:34:41.000Z", "num_tokens": 1213, "size": 5883 }
% Options for packages loaded elsewhere \PassOptionsToPackage{unicode}{hyperref} \PassOptionsToPackage{hyphens}{url} \PassOptionsToPackage{dvipsnames,svgnames*,x11names*}{xcolor} % \documentclass[ fontsize=13pt, english, a4paper, openany, a4paper, oneside]{book} \usepackage{amsmath,amssymb} \usepackage{lmodern} \usepackage{setspace} \usepackage{ifxetex,ifluatex} \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provide euro and other symbols \else % if luatex or xetex \usepackage{unicode-math} \defaultfontfeatures{Scale=MatchLowercase} \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} \fi % Use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} \IfFileExists{microtype.sty}{% use microtype if available \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \makeatletter \@ifundefined{KOMAClassName}{% if non-KOMA class \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt}} }{% if KOMA class \KOMAoptions{parskip=half}} \makeatother \usepackage{xcolor} \IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available \IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} \hypersetup{ pdftitle={Open Collaboration Guide}, pdfauthor={Daniel Antal}, pdflang={en}, colorlinks=true, linkcolor=blue, filecolor=Maroon, citecolor=Blue, urlcolor=blue, pdfcreator={LaTeX via pandoc}} \urlstyle{same} % disable monospaced font for URLs \usepackage[left=3cm, right=3cm, top=2.5cm, bottom=2.5cm]{geometry} \usepackage{color} \usepackage{fancyvrb} \newcommand{\VerbBar}{|} \newcommand{\VERB}{\Verb[commandchars=\\\{\}]} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \usepackage{framed} \definecolor{shadecolor}{RGB}{248,248,248} \newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\BuiltInTok}[1]{#1} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} \newcommand{\ExtensionTok}[1]{#1} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ImportTok}[1]{#1} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\NormalTok}[1]{#1} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\RegionMarkerTok}[1]{#1} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \usepackage{longtable,booktabs,array} \usepackage{calc} % for calculating minipage widths % Correct order of tables after \paragraph or \subparagraph \usepackage{etoolbox} \makeatletter \patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{} \makeatother % Allow footnotes in longtable head/foot \IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}} \makesavenoteenv{longtable} \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \ifxetex % Load polyglossia as late as possible: uses bidi with RTL langages (e.g. Hebrew, Arabic) \usepackage{polyglossia} \setmainlanguage[]{english} \else \usepackage[main=english]{babel} % get rid of language-specific shorthands (see #6817): \let\LanguageShortHands\languageshorthands \def\languageshorthands#1{} \fi \ifluatex \usepackage{selnolig} % disable illegal ligatures \fi \usepackage[]{natbib} \bibliographystyle{apalike} \title{Open Collaboration Guide} \author{Daniel Antal} \date{2021-04-16} \begin{document} \maketitle { \hypersetup{linkcolor=} \setcounter{tocdepth}{1} \tableofcontents } \setstretch{1.1} \hypertarget{welcome}{% \chapter*{Welcome}\label{welcome}} \addcontentsline{toc}{chapter}{Welcome} Placeholder \hypertarget{intro}{% \chapter{Introduction}\label{intro}} Placeholder \hypertarget{definitions}{% \section{Definitions}\label{definitions}} \hypertarget{pledge}{% \section{Code of Conduct}\label{pledge}} \hypertarget{collaboration-tools}{% \section{Collaboration Tools}\label{collaboration-tools}} \hypertarget{keybase}{% \subsection{Instant messaging: Keybase}\label{keybase}} \hypertarget{github}{% \subsection{Git \& Github}\label{github}} \hypertarget{rstudio-ide-other-ide}{% \subsection{Rstudio IDE \& other IDE}\label{rstudio-ide-other-ide}} \hypertarget{simple-intro}{% \section{Simple Introduction}\label{simple-intro}} \hypertarget{markdown}{% \subsection{Markdown}\label{markdown}} \hypertarget{inspiration}{% \section{Inspiration \& Recommended Reading}\label{inspiration}} \hypertarget{books}{% \subsection{Books}\label{books}} \hypertarget{why-weapons-of-math-destruction}{% \subsubsection{Why: Weapons of Math Destruction}\label{why-weapons-of-math-destruction}} \hypertarget{how-metadata}{% \subsubsection{How: Metadata}\label{how-metadata}} \hypertarget{critically-data-feminism}{% \subsubsection{Critically: Data Feminism}\label{critically-data-feminism}} \hypertarget{blogposts}{% \subsection{Blog posts \& Podcasts}\label{blogposts}} \hypertarget{data-curation}{% \chapter{Data Curation}\label{data-curation}} Placeholder \hypertarget{eu-datathlon}{% \section{EU Datathlon 2021}\label{eu-datathlon}} \hypertarget{observatories}{% \section{Observatories}\label{observatories}} \hypertarget{meta-observatory}{% \section{Meta Observatory}\label{meta-observatory}} \hypertarget{emo}{% \subsection{European Music Observatory}\label{emo}} \hypertarget{strengths}{% \section{Strengths}\label{strengths}} \hypertarget{acquisition}{% \chapter{Data Acquistion}\label{acquisition}} Whatever is the source of the data we use it, we never trust it fully. We need check its strength and weaknesses, and bring it to a complete, documented and tidy form. \hypertarget{collect-metadata}{% \section{Metadata}\label{collect-metadata}} Metadata plays an important role to find whatever we acquired and use it properly. It plays an important role during the storage of data in our \protect\hyperlink{data-storage}{databases} and in \protect\hyperlink{documentation}{documentation and publication}, too. We placed it into a \protect\hyperlink{metadata}{separate chapter}. \hypertarget{eurostat}{% \section{Eurostat}\label{eurostat}} \hypertarget{harmonized-survey-programs}{% \section{Harmonized Survey Programs}\label{harmonized-survey-programs}} \hypertarget{music-apis}{% \section{Music APIs}\label{music-apis}} \hypertarget{spotify-api}{% \subsection{Spotify API}\label{spotify-api}} We collect the following data from the Spotify API \hypertarget{bandcamp}{% \subsection{Bandcamp}\label{bandcamp}} \hypertarget{data-storage}{% \chapter{Data Storage \& Databases}\label{data-storage}} Placeholder \hypertarget{storage-metadata}{% \section{Metadata}\label{storage-metadata}} \hypertarget{dta-raw}{% \section{Raw data assets}\label{dta-raw}} \hypertarget{processed-individual-data}{% \section{Processed, individual data}\label{processed-individual-data}} \hypertarget{indicators---statistically-processed-data}{% \section{Indicators - statistically processed data}\label{indicators---statistically-processed-data}} \hypertarget{periodic-data-releases}{% \section{Periodic data releases}\label{periodic-data-releases}} \hypertarget{interactive-data-releases}{% \section{Interactive data releases}\label{interactive-data-releases}} \hypertarget{continous-data-releases-in-api}{% \section{Continous data releases in API}\label{continous-data-releases-in-api}} \hypertarget{applications}{% \chapter{Applications}\label{applications}} Placeholder \hypertarget{naming-conventions}{% \section{File, Variable Names, Value Labels}\label{naming-conventions}} \hypertarget{path}{% \subsection{Path}\label{path}} \hypertarget{R-guide}{% \subsubsection{R language}\label{R-guide}} \hypertarget{variable-nameing-styles}{% \subsection{Variable nameing styles}\label{variable-nameing-styles}} \hypertarget{character-coding}{% \subsection{Character coding}\label{character-coding}} \hypertarget{statistical-software}{% \section{Statistical Processing \& Indicators}\label{statistical-software}} \hypertarget{retroharmonize}{% \subsection{retroharmonize}\label{retroharmonize}} \hypertarget{regions}{% \subsection{regions}\label{regions}} \hypertarget{iotables}{% \subsection{iotables}\label{iotables}} \hypertarget{machine-learning}{% \section{Machine Learning Applications}\label{machine-learning}} \hypertarget{listen-local}{% \subsection{Listen Local app}\label{listen-local}} \hypertarget{bandcamp-librarian}{% \subsection{Bandcamp Librarian app}\label{bandcamp-librarian}} \hypertarget{maps}{% \section{Maps}\label{maps}} \hypertarget{documentation}{% \chapter{Documentation And Publications}\label{documentation}} \hypertarget{bibliography}{% \section{Citations, Bibliography}\label{bibliography}} For any external literature, data source, please, store the citation information in a well-formatted, thematic \texttt{.bib} files. We create programatically \texttt{.bib} citation information files for our data products, software releases, and published documents. \begin{Shaded} \begin{Highlighting}[] \SpecialCharTok{@}\NormalTok{Manual\{R}\SpecialCharTok{{-}}\NormalTok{regions,} \NormalTok{ title }\OtherTok{=}\NormalTok{ \{regions}\SpecialCharTok{:}\NormalTok{ Processing Regional Statistics\},} \NormalTok{ author }\OtherTok{=}\NormalTok{ \{Daniel Antal\},} \NormalTok{ note }\OtherTok{=}\NormalTok{ \{R package version }\DecValTok{0}\NormalTok{.}\FloatTok{1.6}\NormalTok{\},} \NormalTok{ url }\OtherTok{=}\NormalTok{ \{https}\SpecialCharTok{:}\ErrorTok{//}\NormalTok{regions.danielantal.eu}\SpecialCharTok{/}\NormalTok{\},} \NormalTok{ year }\OtherTok{=}\NormalTok{ \{}\DecValTok{2021}\NormalTok{\},} \NormalTok{\}} \SpecialCharTok{@}\NormalTok{Manual\{R}\SpecialCharTok{{-}}\NormalTok{retroharmonize,} \NormalTok{ title }\OtherTok{=}\NormalTok{ \{retroharmonize}\SpecialCharTok{:}\NormalTok{ Ex Post Survey Data Harmonization\},} \NormalTok{ author }\OtherTok{=}\NormalTok{ \{Daniel Antal\},} \NormalTok{ note }\OtherTok{=}\NormalTok{ \{R package version }\DecValTok{0}\NormalTok{.}\FloatTok{1.15}\NormalTok{\},} \NormalTok{ url }\OtherTok{=}\NormalTok{ \{https}\SpecialCharTok{:}\ErrorTok{//}\NormalTok{retroharmonize.dataobservatory.eu}\SpecialCharTok{/}\NormalTok{\},} \NormalTok{ year }\OtherTok{=}\NormalTok{ \{}\DecValTok{2021}\NormalTok{\},} \NormalTok{\}} \end{Highlighting} \end{Shaded} Marking up a reference to \citep{R-retroharmonize} with \texttt{{[}@R-retroharmonize{]}} will place the citation in your selected format (here APA) to the text, and to the \protect\hyperlink{references}{references} at the end of this documentation. \textbf{We would love to cite your work}. Please, create a well-formatted \texttt{.bib} file with your publications about the best use of data, creation of data, scientific analysis of data, etc, and save it to this repo as \texttt{name-surname.bib}. Make sure that each \texttt{.bib} entry contains at least the \texttt{title}, \texttt{author} and \texttt{year} fields. \hypertarget{software-releases}{% \section{Software Releases}\label{software-releases}} We have so far three released software products. \begin{itemize} \tightlist \item The aim of \href{https://reprex.nl/software/retroharmonize/}{retroharmonize} is to provide tools for reproducible retrospective (ex-post) harmonization of datasets that contain variables measuring the same concepts but coded in different ways. \item The \href{https://reprex.nl/software/regions/}{regions} package is an offspring of the eurostat package on \href{https://github.com/rOpenGov}{rOpenGov}. It started as a tool to validate and re-code regional Eurostat statistics, but it aims to be a general solution for all sub-national statistics. It will be developed parallel with other rOpenGov packages. \item \href{https://reprex.nl/software/iotables/}{iotables} processes all the symmetric input-output tables of the EU member states, and calculates direct, indirect and induced effects, multipliers for GVA, employment, taxation. These are important inputs into policy evaluation, business forecasting, or granting/development indicator design. iotables is used by about 800 experts around the world. \end{itemize} They are released on CRAN, and they follow the CRAN guidelines for unit-testing and human review. As CRAN relies on extensive automated testing, the formatting standard is \emph{very strict} both for the software code and its documentation. The slightest deviation results in rejection. \hypertarget{documentation-metadata}{% \section{Metadata}\label{documentation-metadata}} Metadata plays an important role in our \protect\hyperlink{data-storage}{databases} and in \protect\hyperlink{documentation}{documentation}, too. We placed it into a \protect\hyperlink{metadata}{separate chapter}. \hypertarget{data-release}{% \section{Data releases}\label{data-release}} We currently work with two platforms, and we must maintain compatibility with data repositories that our clients use. Both provide some validation, version control, and give a standard \textbf{doi} to our releases. \hypertarget{dataverse}{% \subsection{Dataverse}\label{dataverse}} \href{https://dataverse.org/}{dataverse.org/} has a well-supported API, and this is the choice of our first academic partner, IViR. The Dataverse Project is being developed at Harvard's Institute for Quantitative Social Science (IQSS), along with many collaborators and contributors worldwide. The Dataverse Project was built on our experience with our earlier Virtual Data Center (VDC) project, which spanned 1997-2006 as a collaboration between the Harvard-MIT Data Center (now part of IQSS) and the Harvard University Library. Precursors to the VDC date to 1987, comprising such entities as pre-web software to automatically transfer cataloging information by FTP to other sites across campus automatically at designated times, and before that to a stand-alone software guide to local data. \hypertarget{zenodo}{% \subsection{Zenodo}\label{zenodo}} \href{https://zenodo.org/}{zenodo} is the choice of the European Union, and it is likely that in the future all EU-financed research data must be published here. The OpenAIRE project, in the vanguard of the open access and open data movements in Europe was commissioned by the EC to support their nascent Open Data policy by providing a catch-all repository for EC funded research. CERN, an OpenAIRE partner and pioneer in open source, open access and open data, provided this capability and Zenodo was launched in May 2013. In support of its research programme CERN has developed tools for Big Data management and extended Digital Library capabilities for Open Data. Through Zenodo these Big Science tools could be effectively shared with the long-tail of research. \hypertarget{publications}{% \section{Publications}\label{publications}} Our work is often embedded in publications. We want to make the application of various formatting guidelines as painless as possible. This is one of the main motivations to use markdown and Rmarkdown to document our work. There are more and more conversion tools that automatically convert our longform documentation from markdown or Rmarkdown to the formatting standards of almost any scientific publisher. \hypertarget{namespace}{% \section{Namespace}\label{namespace}} \hypertarget{naming-variables-objects-features}{% \subsection{Naming variables, objects, features}\label{naming-variables-objects-features}} In our program code and databases, we are following Open Science naming guidelines. While there are many versions of naming guidelines, we must pick something that works for all and adhere to it. For ease of programmatic use, we should follow a lowercase, snakecase variable naming. \begin{verbatim} ## Loading required package: dplyr \end{verbatim} \begin{verbatim} ## Warning: package 'dplyr' was built under R version 4.0.4 \end{verbatim} \begin{verbatim} ## ## Attaching package: 'dplyr' \end{verbatim} \begin{verbatim} ## The following objects are masked from 'package:stats': ## ## filter, lag \end{verbatim} \begin{verbatim} ## The following objects are masked from 'package:base': ## ## intersect, setdiff, setequal, union \end{verbatim} \begin{verbatim} ## Loading required package: snakecase \end{verbatim} \begin{tabular}{l|l} \hline original & canonical\\ \hline This Variable & this\_variable\\ \hline X1 & x\_1\\ \hline VarName & var\_name\\ \hline My\_Var & my\_var\\ \hline that\_variable & that\_variable\\ \hline \end{tabular} Variables should describe things starting with general to specific, when possible, or make group selections, such as \texttt{starts\_with()} or \texttt{ends\_with()} easy to use, because \texttt{contains()} can be ambiguous. \begin{tabular}{l|l} \hline original & canonical\\ \hline spotify\_artist\_id & spotify\_artist\_id\\ \hline deezer\_id\_artist & deezer\_id\_artist\\ \hline id\_on\_bandcamp\_track & id\_on\_bandcamp\_track\\ \hline artist\_bandcamp\_id & artist\_bandcamp\_id\\ \hline gender\_artist & gender\_artist\\ \hline \end{tabular} \begin{itemize} \tightlist \item \texttt{contains("artist")} selects all artist features \item \texttt{ends\_with("\_id")} selects all ID's. Because \texttt{id} is short, it may appear in words like \texttt{india}. But \texttt{\_id} is unique, particularly if it is the end of a variable name. \item \texttt{starts\_with("spotify")} selects all features related to Spotify, and \texttt{starts\_with("spotify")\ \&\ contains("\_rec\_")} selects all recording features. \end{itemize} \hypertarget{metadata}{% \chapter{Metadata}\label{metadata}} We follow the metadata definition and concepts of \href{https://mitpress.mit.edu/books/metadata}{Metadata} from the MIT Press Essential Knowledge series. It is not, Pomerantz tell us, just ``data about data.'' It is a means by which the complexity of an object is represented in a simpler form. For example, the title, the author, and the cover art are metadata about a book. When metadata does its job well, it fades into the background; everyone (except perhaps the NSA) takes it for granted. Pomerantz explains what metadata is, and why it exists. He distinguishes among different types of metadata---descriptive, administrative, structural, preservation, and use---and examines different users and uses of each type. He discusses the technologies that make modern metadata possible, and he speculates about metadata's future. By the end of the book, readers will see metadata everywhere. Because, Pomerantz warns us, it's metadata's world, and we are just living in it. \hypertarget{covenant}{% \chapter{Contributor Covenant Code of Conduct}\label{covenant}} Placeholder \hypertarget{our-pledge}{% \section{Our Pledge}\label{our-pledge}} \hypertarget{our-standards}{% \section{Our Standards}\label{our-standards}} \hypertarget{enforcement-responsibilities}{% \section{Enforcement Responsibilities}\label{enforcement-responsibilities}} \hypertarget{scope}{% \section{Scope}\label{scope}} \hypertarget{enforcement}{% \section{Enforcement}\label{enforcement}} \hypertarget{enforcement-guidelines}{% \section{Enforcement Guidelines}\label{enforcement-guidelines}} \hypertarget{correction}{% \subsection{1. Correction}\label{correction}} \hypertarget{warning}{% \subsection{2. Warning}\label{warning}} \hypertarget{temporary-ban}{% \subsection{3. Temporary Ban}\label{temporary-ban}} \hypertarget{permanent-ban}{% \subsection{4. Permanent Ban}\label{permanent-ban}} \hypertarget{attribution}{% \section{Attribution}\label{attribution}} \hypertarget{appendix}{% \chapter{Appendix}\label{appendix}} Placeholder \hypertarget{markdown-1}{% \section{Markdown}\label{markdown-1}} \hypertarget{bookdown}{% \section{Bookdown}\label{bookdown}} \hypertarget{tinytex}{% \section{Tinytex}\label{tinytex}} \bibliography{book.bib,packages.bib,antal.bib,ccipolicy.bib,datascience.bib} \end{document}
{ "alphanum_fraction": 0.7659846547, "avg_line_length": 40.029250457, "ext": "tex", "hexsha": "f516bbabab8a557ae9d65dfcc5a9ce038121012d", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-04-19T08:48:56.000Z", "max_forks_repo_forks_event_min_datetime": "2021-04-19T08:48:56.000Z", "max_forks_repo_head_hexsha": "01afecbbb4dea18daed900d50d93d011d153f734", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "dataobservatory-eu/collaboration_guide", "max_forks_repo_path": "reprex_collaboration_guide.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "01afecbbb4dea18daed900d50d93d011d153f734", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "dataobservatory-eu/collaboration_guide", "max_issues_repo_path": "reprex_collaboration_guide.tex", "max_line_length": 661, "max_stars_count": null, "max_stars_repo_head_hexsha": "01afecbbb4dea18daed900d50d93d011d153f734", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "dataobservatory-eu/collaboration_guide", "max_stars_repo_path": "reprex_collaboration_guide.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6373, "size": 21896 }
\subsection{Exponential} \noindent Let $a$ be a constant.\\ By definitions of a Laplace transform and an improper integral, \begin{align*} \Laplace{e^{at}} &= \lim\limits_{n\to\infty}{\int_{0}^{n}{e^{at}e^{-st}\mathrm{d}t}} \\ &= \lim\limits_{n\to\infty}{\int_{0}^{\infty}{e^{(a-s)t} \mathrm{d}t}} \\ &= \frac{1}{a-s}\lim\limits_{n\to\infty}{\left[e^{(a-s)n}\right]}_{0}^{n} \text{, } s \neq a \\ &= \frac{1}{a-s}\lim\limits_{n\to\infty}{\left(e^{(a-s)n} - 1\right)} \text{, } s \neq a \\ &= \begin{cases} \frac{-1}{a-s} & s > a \\ \text{DNE} & s \leq a \end{cases}. \end{align*} So, \begin{equation*} \Laplace{e^{at}} = \frac{-1}{a-s} \text{, } s > a. \end{equation*}
{ "alphanum_fraction": 0.5533428165, "avg_line_length": 39.0555555556, "ext": "tex", "hexsha": "fad841c74e66ad87eb83b5531f2395a8db4d3c96", "lang": "TeX", "max_forks_count": 10, "max_forks_repo_forks_event_max_datetime": "2021-08-17T15:21:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-10T05:41:17.000Z", "max_forks_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "aneziac/Math-Summaries", "max_forks_repo_path": "diffEq/laplaceTransforms/derivations/exponential.tex", "max_issues_count": 26, "max_issues_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_issues_repo_issues_event_max_datetime": "2021-10-07T04:47:03.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-28T17:44:18.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "aneziac/Math-Summaries", "max_issues_repo_path": "diffEq/laplaceTransforms/derivations/exponential.tex", "max_line_length": 98, "max_stars_count": 39, "max_stars_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "aneziac/Math-Summaries", "max_stars_repo_path": "diffEq/laplaceTransforms/derivations/exponential.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:38:45.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-26T06:20:36.000Z", "num_tokens": 316, "size": 703 }
%\documentclass[a4paper,9pt,fleqn,notoc]{diss} %% \renewcommand{\includegraphics}[1][1]{} %\begin{document} \chapter{Construction Grammar with FCG} \label{s:fcg} Construction Grammar posits that linguistic knowledge is organized in the form of ``constructions'' \citep{goldberg1995constructions,croft2001radical}\oldindex{Goldberg, A.}\oldindex{Croft, W.} which are mappings of semantics and pragmatics to syntax, i.e. words and grammar, but also phonology, prosody or intonation. Typically, Construction grammarians take a functional view on language and analyze every piece of language as a tool for communication and in terms of the syntactic and semantic function it performs. The theoretical framework of Construction Grammar is important for this book, because it integrates semantics with syntax and opens up ways for understanding the acquisition and evolution of language as a tool for solving communicative problems in which all elements of processing from semantics to syntax can be used as a tool for solving these problems. Every part of an utterance has meaning and a semantic function. The meaning of a lexical item is the reference to the category, prototype or concept that it refers to. Its function is how it is used both in the semantic structure underlying the phrase and in the syntactic structure of the phrase. The following two examples include the word \textit{rot} (`red') but they use the word in completely different syntactic and semantic structures. \ea\label{ex:4:1} der rote Block\\ \glt `the red block'\\ \z \ea\label{ex:4:2} Rot ist eine Farbe\\ \glt `red is a color'\\ \z % \begin{enumerate} % \item \textit{der rote Block} (`the red block') % \item \textit{Rot ist eine Farbe} (`red is a color') % \end{enumerate} In \REF{ex:4:1} \textit{rot} (`red') is used to modify the set of objects denoted by the word \textit{Block} (`block') whereas in \REF{ex:4:2} the statement is about the color itself. We can precisely capture these differences in semantic function using cognitive operations and IRL (an structure for \REF{ex:4:1} can be found in \sectref{s:irl}). The semantic function is coupled to a particular expression in syntax. In \REF{ex:4:1} the color is expressed as an adjective which signals its use as a modifier. In \REF{ex:4:2} the color is expressed as a noun and signals that the subsequent verb phrase is a fact about the color itself. In production, the speaker can therefore choose to express the category as an adjective if the category is linked to the corresponding cognitive operation (e.g. {\footnotesize\tt apply-color}). In parsing, when he observes a color adjective this allows him to infer that he is supposed to modify a set of objects using that operation. Which set of objects the color adjective modifies is determined by the larger syntactic and semantic context. For instance, in \REF{ex:4:1} the adjective is part of an adjective noun phrase that indicates which set is modified by the color category namely the set of blocks. From the viewpoint of the adjective noun phrase the adjective has the semantic function of providing a modifier and in particular of modifying the set of objects denoted by the noun. Of course, other adjectives, such as spatial adjectives can have the same function within an adjective noun phrase. The modified set is then input to another operation namely the operation {\footnotesize\tt apply-selector} which is marked by the determiner. So what we can see already in these simple examples are mappings from semantics to syntax and back, where every aspect of syntax, i.e. words and grammatical relations have a specific effect on the semantic interpretation of the phrase. Vice versa, the speaker can use all the potential of syntax to communicate precise semantic distinctions that he wants to convey. The key item for analysis is the function of items both in syntax and semantics. These dependencies between syntax and semantics can be easily operationalized using FCG\is{Fluid Construction Grammar} \citep{beule2005hierarchy,steels2005linking}\oldindex{De Beule, J.}\oldindex{Steels, L.}\oldindex{Neubauer, N.}. Throughout this book language processing is implemented in FCG, a computational implementation of Construction Grammar. FCG is (1) a formalism that provides a notation for specifying constructions, (2) a an engine that processes linguistic structure by applying constructions, in order, to produce utterances or parse meanings, and (3) a set of design principles for organizing the grammar and linking grammar to representations of semantics, in particular, to semantic structure formalized using IRL. \section{Linguistic processing} Linguistic processing encompasses both production and parsing of utterances. In production, FCG starts from a conceptualized meaning and tries to translate as much as possible of the semantic structure conceptualized by IRL into syntactic structure, i.e. words and grammatical relations using constructions in the linguistic inventory. In parsing this process is reversed and the construction inventory is used to recover as much semantic structure from an utterance as possible. Processing is organized around the \textsc{transient structure}\is{transient structure} which acts as a blackboard representing the current state of processing. Constructions work like rules -- if a construction is applicable, i.e. if conditions for its application are met, the construction can change the transient structure. Over time the transient structure accumulates information provided by the different constructions that have applied until some end state is reached, for instance, no construction can apply anymore. \begin{figure} \begin{center} \includegraphics[width=.8\textwidth]{figs/simple-grammar-initial} \end{center} \caption[Initial transient structure]{\is{transient structure!initial}Initial transient structure which contains only the meaning to be expressed in the top-unit of the semantic pole (left). There is no hierarchy yet and the syntactic pole (right) is empty.} \label{f:initial-structure} \end{figure} \begin{sidewaysfigure} \begin{center} \includegraphics[width=\textwidth]{figs/simple-grammar-final} \end{center} \caption[Final transient structure]{\is{transient structure!final}Final transient structure after many constructions of a simplified German grammar have been applied. The structure consists of units which are hierarchically organized starting from the top-unit. The meaning to be expressed is distributed over various units on the semantic side. Units feature semantic and syntactic categorization ({\footnotesize\tt sem-cat} and {\footnotesize\tt syn-cat}) which was build up in processing to organize constituent structure and allow for high-level constructions to abstract from individual items. On the syntactic side units have {\footnotesize\tt form} features consisting of strings providing words and, so called ``meets constraints'' which introduce word order.} \label{f:final-structure} \end{sidewaysfigure} \subsection{Transient structure}\is{transient structure} The transient structure\is{transient structure} has two poles: a semantic and a syntactic pole. Information regarding meaning is accumulated on the semantic side, information about words and grammatical relations are gathered on the syntactic side. Information is organized into units identified by a {\footnotesize\tt unit-name}. Units consist of {\footnotesize\tt attribute-value} pairs. In order to represent constituent structure, units can form hierarchies in which some units are hierarchically linked to other units effectively building tree like structures. In the beginning of processing the transient structure\is{transient structure} is filled with information either from the conceptualization processes, e.g. in production, or from the utterance observed, e.g. in parsing. Subsequently, constructions change the transient structure by adding new units, introducing hierarchy, changing the value of attributes or by introducing new attributes. Figures \ref{f:initial-structure} and \ref{f:final-structure} show the transition from an initial transient structure\is{transient structure} which only contains a single unit, called ``top-unit'' on each side to a final transient structure with hierarchical organization of units and many more features. The initial structure only contains a meaning on the semantic side. The final structure contains, among other things, strings and syntactic word order constraints which can be used to build an utterance, a process called \textsc{rendering}. Figures~\ref{f:initial-structure} and \ref{f:final-structure} show graphical representations of the list representation (s-expression) used in processing. The following restates the initial transient structure\is{transient structure} as s-expression. % \begin{footnotesize} \begin{lstlisting} ((top (meaning ((apply-selector -?topic -?set-31 -?sel-1) (bind selector -?sel-1 unique) (apply-color-category -?set-31 -?set-12 -?color) (bind color-category -?color red) (apply-class -?set-12 -?ctx-2 -?class) (bind object-class -?class block))))) <--> ((top)) \end{lstlisting} % \end{footnotesize} The top shows the semantic pole. The bottom, after the {\footnotesize\verb|<-->|}, shows the syntactic pole. Both poles have one unit (the top-unit). On the semantic side the top-unit has one attribute, the meaning attribute which has an IRL-network in list form as its value. The following shows the final structure in the same representational format. % \begin{footnotesize} \begin{lstlisting} (... (color-adjective-unit-44 (meaning ((apply-color-category -?set-31 -?set-12 -?color))) (sem-subunits (red-unit-40)) (sem-cat ((sem-fn (modifier)))) (args ((ref -?set-31) (src -?set-12)))) (red-unit-40 (meaning ((bind color-category -?color red))) (sem-cat ((type (color-category)))) (args ((ref -?color)))) ... (top (sem-subunits (determined-noun-phrase-77)))) <--> (... (color-adjective-unit-44 (syn-subunits (red-unit-40)) (syn-cat ((syn-fn (adjectival))))) (red-unit-40 (form ((string ?str-284 "rote"))) (syn-cat ((lex-cat (color-adjective))))) ... (top (syn-subunits (determined-noun-phrase-77)))) \end{lstlisting} % \end{footnotesize} Only parts of the complete final structure are shown, in particular, three units on each pole are shown the {\footnotesize\tt red-unit-40}, {\footnotesize\tt color-adjective-unit-44} and {\footnotesize\tt top}. In contrast to the initial transient structure, meaning is distributed across different units. Notice that which unit is subunit of another is coded by a special attribute called {\footnotesize\tt syn-subunits} on the syntactic pole and {\footnotesize\tt sem-subunits} on the semantic pole. Compare this with \figref{f:after-red-cxn} which shows the hierarchy in the final\enlargethispage{1\baselineskip} structure. For example {\footnotesize\tt red-unit-40} is a subunit of {\footnotesize\tt color-adjective-unit-44}. \begin{figure} \begin{center} \includegraphics[width=0.75\textwidth]{figs/high-level-cxn-application} \end{center} \caption[Construction application]{This figure shows a schematic view on construction application (Figure adapted from \citealt{steels2011design}\oldindex{Steels, L.}). Starting from the initial transient structure\is{transient structure} (left) all constructions in the set of constructions are tested whether they match with the structure. Two constructions match with the initial transient structure. If a construction matches it can merge new information. Construction 72 adds unit C. After the structure has been changed, the process continues and all constructions are checked whether they merge with the transient structure\is{transient structure} modified by construction 72. Because construction 72 has applied, the transient structure is in a state such that construction 2 can now apply. This was previously not the case. Construction 2 is depending on information provided by construction 72. Subsequently, construction 2 further changes the transient structure and so on and so forth. Often multiple constructions from the set of constructions can apply. For example, construction 3 could also change the initial transient structure\is{transient structure}. This poses a general problem in processing which is solved by using a search algorithm described later in this section.} \label{f:cxn-application} \end{figure} \subsection{Constructions}\is{construction} Constructions are organized in the same way as transient structures\is{transient structure}. They consist of two poles and the data in each pole are organized in terms of units, attributes and values. FCG supports bi-directional constructions which means that the same construction is used in production and parsing. The difference between production and parsing is how the syntactic and semantic pole of a construction is used in each case. In production the semantic pole is used to check the applicability of the construction. In parsing the syntactic pole is used. Applicability of a construction is checked using a mechanism called \textsc{matching}.\is{matching} Matching is based on the well studied concept of \textsc{unification} which \is{unification} is a computational process for equating two terms in this case the semantic or syntactic pole of the construction with the corresponding pole of the transient structure. If matching succeeds, the construction can change both poles of the transient structure\is{transient structure}, a process called \textsc{merge}, because it fuses information. The precise inner workings of these two fundamental operations are described in \citet{steels2006fcg}\oldindex{Steels, L.}\oldindex{De Beule, J.}. The most important fact is that matching in FCG mainly relies on variables, which in FCG (and in IRL) start with {\footnotesize\tt ?}. In computational terms constructions specify (1) under which conditions they apply and (2) if they apply how the structure should be changed. \begin{figure} \begin{center} \includegraphics[width=\textwidth]{figs/lex-rot-cxn} \end{center} \caption[Schematic representation of a construction]{Schematic representation of a construction. The two poles of the construction are shown. The top shows the tagged and matching parts of the construction. The bottom shows the hierarchy building part of the construction.} \label{f:lex-rot-cxn} \end{figure} \figref{f:lex-rot-cxn} shows an example of a lexical construction that maps the color category {\footnotesize\tt red} onto the string \textit{rote} (`red') (\figref{f:after-red-cxn} shows what happens when this construction is applied to the initial transient structure\is{transient structure!initial}). The following shows the low-level list representation of the construction schematically depicted in \figref{f:lex-rot-cxn} % \begin{example} \ea\label{e:red-lex-list} % \begin{footnotesize} % \begin{verbatim}[commandchars=\\\{\}] \begin{lstlisting} ((?top-6143 (tag ?meaning-2381 (meaning (== (bind color-category ?red-57 red))))) ((j ?red-unit-158 ?top-6143) ?meaning-2381 (sem-cat ((type (color-category)))) (args ((ref ?red-57))))) <--> ((?top-6143 (tag ?string-251 (form (== (string ?str-251 "rote"))))) ((j ?red-unit-158 ?top-6143) ?string-251 (syn-cat ((lex-cat (color-adjective)))))) \end{lstlisting}\z % \end{footnotesize} % \end{example} The top displays the semantic pole followed by the syntactic pole after the {\footnotesize\verb|<-->|}. In production the construction requires\is{production} the meaning {\footnotesize\tt (bind color-category ?red red)} to be present. If this is the case, the construction merges the information on the syntactic side, in particular the stem, into the transient structure\is{transient structure}. Additionally, this construction builds hierarchy. It introduces a new unit which is a subunit of the top-unit and which is used to collect information for this particular lexical item. Already this simple construction uses the four basic ways in which constructions interact with the transient structure\is{transient structure}: \begin{description} \item[Variables and matching] Constructions inevitably contain many variables. Already the unit names in the transient structure\is{transient structure} are changing every time a new utterance is parsed or a new meaning is produced. But also, just to give another example, variables in the meaning linking cognitive operations are different every time IRL conceptualizes. Using a variable in one part of the construction and repeating it in another can lead to changes in the transient structure triggered by matching and merging \citep{steels2006fcg}\oldindex{Steels, L.}\oldindex{De Beule, J.}. \REF{e:red-lex-list}, for instance, uses matching and merging by re-using the variable in the meaning {\footnotesize\tt ?red-57} in the {\footnotesize\tt args} attribute. Whatever this variable binds to in processing the re-occurring variable will make sure that the data is available in both places. \item[Hierarchy] Hierarchy is built using a special operator called the ``J-operator'', which changes the transient structure to include a new unit \citep{beule2005hierarchy}\oldindex{De Beule, J.}\oldindex{Steels, L.}. The new unit can have units that are already present in the transient structure as children. A construction can therefore easily change the hierarchical structure of the complete pole. The J-operator syntax is: %\begin{example*} \ea\label{e:j-op-syntax} % \begin{footnotesize} % \begin{Verbatim}[commandchars=\\\{\}] \begin{lstlisting} ((J ?new-unit ?parent (?child-1 .. ?child-n)) (new-attribute new-attribute-value)) \end{lstlisting}\z % \end{Verbatim} % \end{footnotesize} %\end{example*} In the example construction the J-operator is used on the semantic and on the syntactic side. It introduces new units on both sides and adds information to this unit (in \figref{f:lex-rot-cxn} the parts pertaining to the J-operator are shown below the dotted line). Notice that the name of the new units is equal. \item[Movement] Constructions can \emph{tag} attributes and their values in order to move them around. In this example construction, the tag-operator moves the bind statement pertaining to the color category from the top-unit to the newly created unit. The tag operator takes the following form: % \begin{footnotesize} % \begin{Verbatim}[commandchars=\\\{\}] \begin{lstlisting} (?unit (tag ?tag-variable (attribute attribute-value)) \end{lstlisting} % \end{Verbatim} % \end{footnotesize} The operator binds whatever follows the variable {\footnotesize\tt ?tag-variable} to the variable. If the variable is used in a J-unit, i.e. a unit with a J-operator, in another part of the construction, this denotes the place where {\footnotesize\tt (attribute attribute-value)} will be moved. The example construction has {\footnotesize\tt tag} operators on the semantic side for moving the bind statement to the new semantic unit. Similarly, on the syntactic side the operator is used to move the string \textit{rote} to the new syntactic unit. \end{description} \begin{figure} \begin{center} \includegraphics[width=\textwidth]{figs/simple-grammar-after-red-application} \end{center} \caption[Transient structure after lexical constructions applied]{% Transient structure after the lexical construction applied. The construction has introduced two new units using the J-operator. One on the semantic side and one on the syntactic side. Both units have the same name {\footnotesize\tt red-unit-42}. The construction introduced the string \textit{rote} on the syntactic side and the bind statements used for triggering the construction has been moved moved using the tag-operator from the top-unit to the new semantic subunit. The construction also added new semantic and syntactic categories ({\footnotesize\tt sem-cat} and {\footnotesize\tt syn-cat}) that can be used by subsequent constructions.} \label{f:after-red-cxn} \end{figure} \subsection{Search} Constructions are organized in a pool of constructions. In principle, constructions compete for access to the transient structure in processing. More than one construction can typically apply to the transient structure and the question is how to organize the process if there are multiple constructions that want to change the transient structure. In the absence of a~priori rules to prefer one construction over another, each construction that can apply to the transient structure\is{transient structure}, is tried in a different branch of a heuristically guided search process. In other words, instead of having competing constructions change the same transient structure, the structure is copied and each potentially applying construction is applied to a copy without necessarily influencing the other. Naturally, this leads to different branches in processing, in which each branch computes a particular parsing or production result. Search is represented using a search tree in which each node contains a transient structure. The initial node contains the initial transient structure. Leaf nodes contain final structures. The search process itself can be manipulated. For instance, it is possible to remove and refrain from processing duplicate nodes which contain the same transient structure\is{transient structure} and the order of following a particular branch can be influenced by how successful one predicts the branch to be. \figref{f:fcg-search} shows an example search tree for production of the utterance \textit{der rote Block}. \begin{figure} \begin{center} \includegraphics[width=1.0\columnwidth]{figs/der-rote-block-fcg-search} \end{center} \caption[FCG search tree in production]{FCG search tree which produces \textit{der rote Block} given the IRL-network shown in \figref{f:the-red-block-network}.} \label{f:fcg-search} \end{figure} \subsection{Design layer} In order to design grammars it has proven beneficial to abstract from the low level processing layer of FCG and add a representational layer that connects high level linguistic analysis with the processing engine of FCG. The idea is to allow re-occurring problems in grammar design to be solved using \textsc{templates} -- without having to resort and copy the code needed for describing a construction in the basic list notation. Templates are a general mechanism for expressing \textsc{design patterns}\is{design pattern}, i.e. solutions that can be re-used to deal with the same problem occurring in different situations. For instance, all grammars implement phrasal constructions. One of the main semantic functions of phrasal constructions is to introduce variable equalities for linking constituents. A template encapsulates the solution to the problem of linking constituents in a way that the solution can be re-used in other phrasal constructions of the same grammar, but ideally also for phrasal constructions in other grammars. Templates are defined similar to functions. They have a name and a set of arguments which are specific to the template. % \begin{example} \ea\label{e:template-syntax} % \begin{footnotesize} % \begin{Verbatim}[commandchars=\\\{\}] % Our Typewriter font has no italics, so no \emph for you :( \begin{lstlisting}[columns=fixed] (template-name construction-name :argument-1 value-1 :argument-2 value-2 ... :argument-n value-n) \end{lstlisting}\z % \end{footnotesize} % \end{example} Let us consider an example. I redefine the lexical construction introduced earlier, using a template called {\footnotesize\tt def-lex-skeleton}. % \begin{example} \ea\label{e:def-lex-rot} % \begin{footnotesize} % \begin{Verbatim}[commandchars=\\\{\}] \begin{lstlisting} (def-lex-skeleton red-cxn :meaning (== (bind color-category ?cat red)) :args ((ref ?cat)) :string "rote") \end{lstlisting}\z % \end{Verbatim} % \end{footnotesize} % \end{example} If this template is executed it translates into the low-level list representation in \REF{e:red-lex-list}. \section{Open-ended language evolution with FCG} Besides the obvious requirement of computational formalism for linguistic processing for computational experiments, FCG has a number of features that make it an optimal choice for studies in language evolution. FCG is not fixed to a certain set of constructions, a particular grammar layout, a particular set of meanings, or even a particular set of semantic and syntactic categories. FCG solely provides dedicated mechanisms for processing language but makes no actual claims about how a particular phenomenon should be processed in language. % Most importantly it makes no claims about the % layout of the constructions designed to process % a particular phenomenon. This allows different solutions to be explored by grammar designers. But, most importantly, it allows artificial agents to invent different constructions for solving a particular problem in communication, track their success and adapt them until the agents have conventionalized a solution to their particular problem. % This is a point % of utmost importance. Language is not a fixed system, but rather a system negotiated by its users to reach communicative goals in a decentralized manner. The fact that there are different solutions to solving the same problem therefore requires formalisms that are designed to be open to change syntactic and semantic categorization, and evolve meaning structure and new constructions. FCG is such a formalism. From a computational perspective, FCG provides an easily manipulatable data representation, which makes inventing new constructions, changing and adapting semantic and syntactic categories, and introducing hierarchy or movement of data relatively easy. As in Construction Grammar the \emph{unified} nature of representation is important. There is absolutely no difference in terms of representation and processing between lexical, functional or phrasal constructions. Hence, FCG supports research into how constructions can change from lexical to grammatical constructions, which is of interest for the study of the influence of the grammaticalization processes on language evolution \citep{traugott1991grammaticalization}\oldindex{Heine, B.}\oldindex{Traugott, E. C.}. Another important argument for the use of FCG is its robust behavior in parsing and production. The search process\is{parsing} for construction application and the bi-directional nature of constructions allow agents to produce as much of the meaning as they can when they are speaker. In parsing, the same process allows agents to recover as much of the semantics of a phrase as they possibly can. This is a prerequisite for any kind of grounded language learning let alone language evolution. Agents have to get as much information as possible from the different systems, such as perception and conceptualization, but also language processing. If agents would have to deal with a grammar engine that essentially gives up on processing as soon as an agent encounters a phrase that he thinks is unconventional, learning the new unconventional phrase can never occur or is significantly hindered. Whereas if the grammar engine provides as much information as possible, agents have a much better shot at guessing underlying meaning and making sense of what was conveyed to them. Subsequently, they can better represent the new parts of an utterance versus parts they already know. Modeling this whole process as a search process is an immense advantage of FCG. Agents can track what changes when they apply other constructions and explore different possible parse and produce results, in order to identify problems in language processing. The last point with respect to the advantage of keeping information from the search process that governs linguistic processing is important, in particular for the main problem studied in this book: conventionalization. In order for agents to realize that constructions are competing for the same string, the same grammatical structure or the same meaning, it is vital to fully explore the search process. If there are multiple ways of producing an utterance for a meaning, for instance because there are multiple words to express the same category, then the search can recover all of them. Together with a mechanism for tracking success of constructions, the search can choose the best one of them. After the interaction the agent can then update the constructions used and those that he could have used, for instance, by rewarding successfully used constructions and punishing unsuccessful or unused constructions. Constructions are equipped with a score that allows agents to update their inventories by scoring constructions according to their success in communication. If scores get too low agents can remove the affected constructions. \section{Discussion} There is no question that this is a short, in many ways too short, introduction to FCG. FCG has been under continuous development since 1998 and it has developed into a mature system which allows to research complex language phenomena such as Russian aspect \citep{gerasymova2010acquisition,gerasymova2012temporal}\oldindex{Gerasymova, K.}\oldindex{Spranger, M.}. The complexity of natural language has without doubt left its mark on the system and many design choices in the system are not immediately obvious, unless one takes the scope of the research program into account. Recently several book projects \citep{steels2011design,steels2012computational}\oldindex{Steels, L.} attempted to communicate the full scope of FCG research performed in the last decade. The interested reader is referred to these efforts to get a broader introduction. %\bibliographystyle{diss} %\bibliography{papers,space} %\end{document}
{ "alphanum_fraction": 0.7934804413, "avg_line_length": 50.016722408, "ext": "tex", "hexsha": "cbb5ea16877e2656a6e5970bcca86e07ec3317ea", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7056f2405d68066a7b0ba5d0891f327b09c005e3", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "langsci/Spranger", "max_forks_repo_path": "chapters/4-background-fcg.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "7056f2405d68066a7b0ba5d0891f327b09c005e3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "langsci/Spranger", "max_issues_repo_path": "chapters/4-background-fcg.tex", "max_line_length": 149, "max_stars_count": null, "max_stars_repo_head_hexsha": "7056f2405d68066a7b0ba5d0891f327b09c005e3", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "langsci/Spranger", "max_stars_repo_path": "chapters/4-background-fcg.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6948, "size": 29910 }
\input{../head.tex} \begin{document} \pagetitle{RITlug Server Use Policy}{Updated April 26, 2014} \section{Access Privileges} \begin{subroutines} \item Members of the Executive Board and those deemed trustworthy by the Executive Board may hold access to elevated privileges (e.g. sudo). \item Those with access to use elevated privileges shall not use their access to violate the privacy of other members without probable cause as decided by either the Executive Board or the Institute. \item Those with access to elevated privileges may modify or move server configurations or data as necessary, without violating any other policies. \item Accounts may be requested from the Executive Board and will be created at the Executive Board's discretion. \end{subroutines} \section{Installation of Software} \begin{subroutines} \item Any installation of system-available software must be approved by a member of the Executive Board. \item Installation of software which must be run with elevated privileges must be approved by a majority of the Executive Board. \item The Executive Board shall decide the appropriate requirements for the documentation of newly installed software. \item If software is to be installed on one of RITlug's servers but not all of them, the newly introduced inconsistency must be documented and approved by unanimous vote of the Executive Board. \end{subroutines} \section{Resource Use} \subsection{Storage} \begin{subroutines} \item Users may use storage space to store content as long as it does not violate club or Institute policies. \item Users may not use an excessive amount of storage space, as decided by the Executive Board. \item Users who store self-produced content or code shall retain the rights of said content and do not grant RITlug any rights for its use unless otherwise stated. \end{subroutines} \subsection{CPU/Memory} \begin{subroutines} \item Users may use the RITlug servers for their own research, compilation, and arbitrary tasks, but should not do anything that may require a substantial amount of resources without first obtaining permission from the Executive Board. \item Users should monitor their running software and tasks so that problems can be promptly resolved if they occur. \end{subroutines} \subsection{Network} \begin{subroutines} \item Users must not consume an excessive amount of network bandwidth. \item Users shall not use the RITlug servers to access content which is in violation of institute or club policy, even if said content is not placed on permanent storage. \item Users shall not use the RITlug servers to proxy activity which is in violation of institute or club policy on servers or networks. \item Users who consume enough network bandwidth to substantially disrupt the availability of the RITlug servers on RIT's network shall be given one strike after each instance of disruption. After the third strike, the user in violation will be given a notice of at least 24 hours to back up their data, after which their account will be deleted and their privileges will be revoked. \end{subroutines} \section{Service Use} \subsection{Task Scheduling} \begin{subroutines} \item Users may not create scheduled system tasks without obtaining permission from the Executive Board. \item Users may schedule their own personal tasks, as long as said tasks do not consume an excessive amount of resources and follow all given policies. \end{subroutines} \subsection{Servers} \begin{subroutines} \item Users may publically serve content through the Internet as long as doing so does not violate any other policies. \item Users may not publically serve content for which they do not own the proper permissions and copyrights. \item Users shall not use the servers to publicise content that the Executive Board deems offensive or otherwise in bad taste. \item The Executive Board may vote to remove, revoke, or disallow server use, content distribution, or content storage at any time. \end{subroutines} \section{Virtual Machine Use} \begin{subroutines} \item Virtual machines are governed by the previously stated policies. \item The Owner of a virtual machine shall have root access to their virtual machine. \item The Executive Board may vote to remove any virtual machine at their discretion if they feel that it violates any of the above policies. \end{subroutines} \section{Removing Accounts/Data} \begin{subroutines} \item If data which is scheduled to be removed is not deemed overly offensive or in violation of Institute policy, the owner of said data shall be given an advance warning of at least 24 hours to retrieve or back up their data before it is removed. \item The above policy also applies to removed accounts. \item Accounts and data shall not be removed without first consulting the Executive Board or an official from the Institute. \end{subroutines} \end{document}
{ "alphanum_fraction": 0.808584925, "avg_line_length": 62.4230769231, "ext": "tex", "hexsha": "dcb6ae9acda7a55cdef798546ac9caff7fa7fb96", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2020-05-19T20:59:27.000Z", "max_forks_repo_forks_event_min_datetime": "2016-02-08T16:55:56.000Z", "max_forks_repo_head_hexsha": "b21320cd4a9d0d5fbcd5568a5fda156ae99dffa7", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "RITlug/governance", "max_forks_repo_path": "policy/server_use_policy.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "b21320cd4a9d0d5fbcd5568a5fda156ae99dffa7", "max_issues_repo_issues_event_max_datetime": "2018-03-24T17:44:23.000Z", "max_issues_repo_issues_event_min_datetime": "2017-10-21T18:42:42.000Z", "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "RITlug/governance", "max_issues_repo_path": "policy/server_use_policy.tex", "max_line_length": 383, "max_stars_count": 2, "max_stars_repo_head_hexsha": "b21320cd4a9d0d5fbcd5568a5fda156ae99dffa7", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "RITlug/governance", "max_stars_repo_path": "policy/server_use_policy.tex", "max_stars_repo_stars_event_max_datetime": "2018-10-22T18:58:15.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-06T22:02:29.000Z", "num_tokens": 1013, "size": 4869 }
\cleardoublepage \renewcommand{\papertext}{Paper 08: Reanalysis of a 10-year record (2004--2013) of seasonal mass balances at Langenferner/Vedretta Lunga, Ortler Alps, Italy} \section*{\papertext} \addcontentsline{toc}{section}{\protect\numberline{}\papertext}% \label{paper_08} \vspace{0.5cm} \begin{singlespace} \begin{hangparas}{1em}{1} Galos, S. P., Klug, C., \textbf{Maussion, F.}, Covi, F., Nicholson, L., Rieg, L., Gurgiser, W., Mölg, T. and Kaser, G.: Reanalysis of a 10-year record (2004--2013) of seasonal mass balances at Langenferner/Vedretta Lunga, Ortler Alps, Italy, Cryosph., 11(3), 1417--1439, \href{https://doi.org/10.5194/tc-11-1417-2017}{doi:10.5194/tc-11-1417-2017}, 2017. \end{hangparas} \end{singlespace} \vspace{0.5cm} Measurements of mass balance are the backbone of any glacier modelling effort. “Traditional” measurements have been conducted since decades and form the longest available time series of glacier mass balance. They are conducted by local authorities and scientists, then curated and provided by the World Glacier Monitoring Service (\href{https://wgms.ch}{WGMS}). OGGM (and many other glacier models) are relying heavily on these observations for model calibration and validation, and potential biases and errors will have consequences carried over to the model projections. It is therefore very important to ensure the quality of the provided mass balances, and also revisit (“re-analyse”) previous measurements under the light of modern knowledge and methods. In this study led by Stephan Galos (at that time PhD student at the University of Innsbruck), we use a combination of surface energy balance modelling and partial observations to reconstruct and homogenize past mass balance time series. We show that the reconstructed data can differ substantially from the original estimates, and we discuss the importance of a careful planning of mass balance measurement campaigns. I contributed to this paper by providing guidance on the statistical analysis, and I conducted the bootstrap uncertainty analysis, helping to quantify the uncertainty involved in interpolating ablation stake data to the glacier scale. I am also regularly involved in fieldwork and field courses for our master students on this particular glacier (Langenferner). \href{https://doi.org/10.5194/tc-11-1417-2017}{Link to the paper} (open-access) \iflong \includepdf[pages=-,openright]{./papers/paper_08.pdf} \else \fi
{ "alphanum_fraction": 0.7865306122, "avg_line_length": 50, "ext": "tex", "hexsha": "ae00cd71e11c1949133434838b1cbba7d40bafdd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "fmaussion/habil2.0", "max_forks_repo_path": "tex/chapters/paper_08.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "fmaussion/habil2.0", "max_issues_repo_path": "tex/chapters/paper_08.tex", "max_line_length": 353, "max_stars_count": null, "max_stars_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "fmaussion/habil2.0", "max_stars_repo_path": "tex/chapters/paper_08.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 638, "size": 2450 }
\section{Requirements} \subsection{Functional requirements} \begin{enumerate}[label=A.\arabic*] \item The system will be usable on a smartphone – In order to enable the user to use the provided service while the user is moving around the system has to be reachable from a mobile platform. The user also shall be able to conveniently use a camera to take pictures of the menu without having to type and search for one dish name at a time. \newline \item The system should be usable on notebooks, tablets and stationary computers – The user shall be able to use the service at home or similar environments to be able to gather information about restaurants and dishes and help with the selection of a place to eat. \newline \item The system will run on Android, iOS and Windows 7/8/10 – The system shall run on the most popular operation systems in order to be accessible for as many users in the target audience as possible. \newline \item The system will run within a common third-party software – The user shall be provided with an intensively tested and user-friendly interface. In addition, the user should not have to download a separate application to use the functionalities of this system but instead use this service as an extension to a familiar and popular application. \newline \item The system shall provide contact information to enable the user to contact the support – The user shall be able to receive help in case of questions or problems with the service. In addition, the user should be able to report any bugs that he finds while using the service or give feedback about it. \newline \item The user shall be able to save information about a dish – The user should be able to retrieve information about dishes that he once searched for without having to query the search again. The user should also be able to read the information after closing and reopening the application. \newline \item The system shall support the Korean alphabet as input \newline \item The system will display information about dishes in the English language and alphabet \newline \item The user shall be able to select a dish in the analyzed menu and receive detailed information for this specific dish The system should provide information about the dishes in form of used ingredients, possible allergies and level of spiciness \newline \item The system shall accept pictures and text as input – The user should be able to manually search for a dish if the analyzing of the picture returns no matches \newline \item The system should distinguish between dish names and random letter clusters – The user may input pictures without a menu or the pictures that include random text in addition to the menu. \newline \item This system must be able to retrieve information from the Database after subtracting the recognized number. The restaurant menus are often displayed along with the price figure and Vision api recognize the price value as part of the successive food name. The system should be able to remove following price figure before searching it on the Database. \newline \item The system shall be able to provide reliable information about the menu \newline \begin{itemize} \item The system will support Roman alphabet conversion in order to provide accurate information about Korean pronunciation. Namely, the system will provide Roman Alphabet of Korean dish name so that it can show users how to pronounce Korean dish name in English. This will guarantee user-friendly interface by reducing user’s difficulties of ordering food in Korean name.\newline \item The system will refer to Wikipedia in the process of registering accurate food information in the Database. \newline \item The system will be able to recognize and categorize ‘same meaning, differently expressed’ words as same group. e.g. Two different mark ‘자장면’ and ‘짜장면’ will be recognized as a same dish by the system in the case of retrieving information from the database. \end{itemize} \end{enumerate} \subsection{Working mechanisms} \begin{enumerate} \item Register service\\ User will search for \emph{Before Order} in the Facebook and enter the Facebook Page of the chatbot. Registration process is finished. \newline \item Input information\\ Take a picture of a menu written in Korean alphabet and send it to the \emph{Before Order} chat bot. \newline \item Recognition process\\ The \emph{Before Order} service analyzes the input picture or text and tries to recognize the dishes. \newline \item Select information\\ If the user sent a whole menu he can select a single dish out of the list that \emph{Before Order} provides and receive more detailed information for that specific dish. \newline \item Retrieve selected food information\\When a user selects a menu from the list, \emph{Before Order} retrieve the information about it from the DB we built. \newline \item Display detailed information\\\emph{Before Order} provides the user with detailed information about the requested dish.\newline\newline \end{enumerate}
{ "alphanum_fraction": 0.802626343, "avg_line_length": 119.6666666667, "ext": "tex", "hexsha": "439eaa9557a94ccee1eff778a1af0346f5ab7cd1", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-04-28T16:07:11.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-28T16:07:11.000Z", "max_forks_repo_head_hexsha": "89e17705bb82817932185749f2513a8fafe6bf8d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Akkarin1212/before_order", "max_forks_repo_path": "doc/content/requirements.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "89e17705bb82817932185749f2513a8fafe6bf8d", "max_issues_repo_issues_event_max_datetime": "2021-06-01T22:59:31.000Z", "max_issues_repo_issues_event_min_datetime": "2018-11-28T07:05:44.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Akkarin1212/before_order", "max_issues_repo_path": "doc/content/requirements.tex", "max_line_length": 365, "max_stars_count": null, "max_stars_repo_head_hexsha": "89e17705bb82817932185749f2513a8fafe6bf8d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Akkarin1212/before_order", "max_stars_repo_path": "doc/content/requirements.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1029, "size": 5026 }
%------------------------- % Author: Mike Vanbuskirk % License : MIT %------------------------ \documentclass[]{mv_cv} \setcounter{secnumdepth}{0} \begin{document} \header{Mike Vanbuskirk} {DevOps Engineer | Grand Rapids, MI | linkedin.com/in/mikevanbuskirk} \section{summary} \begin{content} { Lead engineer specializing in DevOps, cloud-first architecture, and automation. I'm also a blogger and freelance technical content creator. } \end{content} \section{education} \begin{content} {2009 - 2012 WGU | BSc - Networks Design \& Management} \end{content} \section{languages and technologies} \begin{content} {Python, Bash, Golang, Node.js, Terraform, AWS, Serverless, Git, Docker, Heroku, CI/CD} \end{content} %\section{technologies} %\begin{content} % {AWS, Git, Docker, Terraform, Chef, Puppet, Ansible, Consul, Nomad, Vagrant, Packer, TravisCI, Jenkins, New Relic, DataDog, Heroku} %\end{content} \section{experience} \begin{explist} \expitem {May 2021 - Present}{Altitude Networks}{Lead Infrastructure Engineer} {Lead Infrastructure Engineer, focusing on serverless infrastructure deployed with Terraform} \expitem {May 2020 - Present}{IOD}{Freelance Technical Writer} {Freelance technical content creation, focusing on DevOps, Cloud, and Serverless technology.} \expitem {Feb 2019 - May 2021}{Salesforce}{Lead DevOps Engineer} {Lead DevOps engineer, supporting Desk.com and the LiveMessaging platform. Currently focused on automating various aspects of infrastructure and operations, including disaster recovery.} \expitem {Dec 2017 - Feb 2019}{Apptio}{Senior DevOps Engineer} {Senior DevOps Engineer on the Technical Operations team. Supported legacy, on-premise datacenter applications, as well as greenfield deployments in AWS using state-of-the-art tooling.} \expitem {Jul 2016 - Nov 2017}{The Walt Disney Company}{Senior Systems Engineer} {Systems Engineer on the Enterprise Messaging and Foundational Services Team. Primary focus is designing and deploying DevOps infrastructure, as well as migration of critical application stacks to the cloud.} \expitem {Mar 2016 - Jul 2016}{Amazon}{Technical Operations Team Lead} {Team Lead for Seattle-based Technical Operations Support Team.} \expitem {Aug 2014 - Jul 2016}{Amazon}{Technical Operations Support Engineer} {Operations Engineer supporting AWS and Amazon.com, provided monitoring and maintenance of the largest e-commerce and cloud platform in the world.} \expitem {Jun 2013 - Aug 2014}{Centurylink/Level(3)}{CDN NOC Linux Admin} {System administration focused on maintenance, monitoring, and triage for a global CDN with multiple thousands of Linux nodes.} \end{explist} \thispagestyle{empty} \end{document}
{ "alphanum_fraction": 0.7379679144, "avg_line_length": 41.8656716418, "ext": "tex", "hexsha": "758f725cc4a01eeda72ea385090a642b1afd9fa9", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-02-16T21:50:42.000Z", "max_forks_repo_forks_event_min_datetime": "2020-02-16T21:50:42.000Z", "max_forks_repo_head_hexsha": "29f9c7ef37edd3b688109fbcd389f7f2eaa275e3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mikevanb/mv-resume", "max_forks_repo_path": "mv_cv.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "29f9c7ef37edd3b688109fbcd389f7f2eaa275e3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mikevanb/mv-resume", "max_issues_repo_path": "mv_cv.tex", "max_line_length": 212, "max_stars_count": 1, "max_stars_repo_head_hexsha": "29f9c7ef37edd3b688109fbcd389f7f2eaa275e3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mikevanb/mv-resume", "max_stars_repo_path": "mv_cv.tex", "max_stars_repo_stars_event_max_datetime": "2020-06-03T02:23:24.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-03T02:23:24.000Z", "num_tokens": 708, "size": 2805 }
\subsubsection{\stid{6.01} LANL ATDM Programming Models and Runtimes} \paragraph{Overview} %\textit{Provide an overview of your project. You might find that the introductory text from your Fall 2017 Project Summary \url{https://confluence.exascaleproject.org/display/1ST/Fall+2017+ECP+ST+Project+Summaries} useful as a starting draft.} The LANL ATDM PMR effort is focusing on the development and use of advanced programming models for Advanced Technology Development and Mitigation use-cases. Our current focus is on research and development of new programming model capabilities in the Legion data-centric programming system. Legion provides unique capabilities that align well with our focus on the development of tools and technologies that enables a separation of concerns of computational physicists and computer scientists. Within the ATDM PMR effort we have focused on the development of significant new capabilities within the Legion runtime that are specifically required to support LANL's ATDM applications. Another key component of our work is the co-design and integration of advanced programming model research and development within FleCSI, a Flexible Computational Science Infrastructure. A major benefit to the broader ECP community is the development of new features in the Legion programming system which are available as free open-source software \url{https://gitlab.com/StanfordLegion/legion}. \paragraph{Key Challenges} \leavevmode \\ %\textit{Describe what is hard to do, why it is challenging.} \textbf{Legion.} Applications will face significant challenges in realizing sustained performance on next-generation systems. Increasing system complexity coupled with increasing scale will require significant changes to our current programming model approaches. This is of particular importance for large-scale multi-physics applications where the application itself is often highly dynamic and can exhibit high variability in resource utilization and system bottlenecks depending on what physics are currently in use (or emphasized). Our goal in the LANL ATDM PMR project is to support these highly dynamic applications on Exascale systems, providing improvements in productivity, long-term maintainability, and performance portability of our next-generation applications. \textbf{FleCSI Legion integration.} FleCSI is a Flexible Computational Science Infrastructure whose goal is to provide a common framework for application development for LANL's next-generation codes. FleCSI is required to support a variety of different distributed data structures and computation on these data structures including structured and unstructured mesh as well as mesh-free methods. Our work in the LANL ATDM PMR project is focused on co-designing the FleCSI data and execution model with the Legion programming model to ensure the latest advancements in the programming model and runtimes research community are represented in our computational infrastructure. A significant challenge in our work is the additional constraint that FleCSI must also support other runtime systems such as MPI. Given this constraint, we have chosen an approach that ensures functional correctness across both runtimes but that also leverages and benefits from capabilities in Legion that are not directly supported in MPI (such as task-based parallelism as a first-class construct). \paragraph{Solution Strategy} \leavevmode \\ %\textit{Describe your basic strategy for addressing the challenges.} \textbf{Legion.} In funded collaboration with NVIDIA, LANL and NVIDIA are developing new features in Legion to support our applications. Necessary features are identified through direct engagement with application developers and through rapid development, evaluation, and refactoring within the team. Major features include Dynamic Control Replication for improved scalability and productivity and Dynamic Tracing to reduce runtime overheads for applications with semi-regular data dependencies such as applications with stencil-based communication patterns. \textbf{FleCSI Legion integration.} LANL staff work on co-design and integration of the Legion programming system into the FleCSI framework. We have regular milestones that align well with application needs and the development of new features within Legion. \begin{figure}[htb] \centering \includegraphics[width=4in]{projects/2.3.6-NNSA/2.3.6.01-LANL-ATDM/control-replication-performance} \caption{\label{fig:control-replication-performance}\textbf{Productivity features such as Dynamic Control Replication scales well across multi-GPU systems in unstructured mesh computations.}} \end{figure} \begin{figure}[htb] \centering \includegraphics[width=4in]{projects/2.3.6-NNSA/2.3.6.01-LANL-ATDM/tracing-performance} \caption{\label{fig:tracing-performance}\textbf{New Legion features such as Tracing will improve strong scaling in unstructured mesh computations.}} \end{figure} \paragraph{Recent Progress} \leavevmode \\ %\textit{Describe what you have done recently. It would be good to have some kind of figure or diagram in this section.} \textbf{Legion.} One of the strengths of Legion is that it executes asynchronous tasks as if they were executed in the sequence they occur in the program. This provides the programmer with a mental model of the computation that is easy to reason about. However, the top-level task in this tree-of-tasks model can often become a sequential bottleneck, as it is responsible for the initial distribution of many subtasks across large machines. In earlier work NVIDIA developed the initial implementation of control replication, which allows the programmer to write tasks with sequential semantics that can be transparently replicated many times, as directed by the Legion mapper interface, and run in a scalable manner across many nodes. Dynamic control replication is an important capability for LANL's ATDM effort, allowing our application teams to write applications with apparently sequential semantics while enabling scalability to Exascale architectures. This approach will improve understandability of application code, productivity, and composability of software and ease the burden of optimization and porting to new architectures. \textbf{FleCSI Legion Integration.} A key component of LANL's Advanced Technology Development and Mitigation effort is the development of a flexible computational science infrastructure (FleCSI) to support a breadth of application use cases for our Next Generation Code. FleCSI has been co-designed with the Legion programing system in order to enable our Next Generation Code to be performance portable and scalable to future Exascale systems. Legion provides the underlying distributed and node-level runtime environment required for FleCSI to leverage task and data parallelism, data dependent execution, and runtime analysis of task dependencies to expose parallelism that would be tedious and error prone to expose at the application or middleware level. We completed an evaluation of the initial implementation of FleCSI on Legion using the FleCSALE hydrodynamics application. \paragraph{Next Steps} \leavevmode \\ \textbf{Legion.} Focus on hardening and scalability of Legion's Dynamic Control Replication and development of Dynamic Tracing for application use-cases. \textbf{FleCSI Legion Integration.} Demonstrate the integration of Dynamic Control Replication and other new Legion features within FleCSI. Our goal is to demonstrate a multi-scale application on the Advanced Technology System, Sierra using our latest advances in the Legion and FleCSI systems.
{ "alphanum_fraction": 0.815493867, "avg_line_length": 112.2463768116, "ext": "tex", "hexsha": "ff6d67b6f3bb32114b99463926df660f6a82ce86", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e8e370db41c84998abe21edda07281718a02c55e", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "shintaro-iwasaki/ECP-ST-CAR-PUBLIC", "max_forks_repo_path": "projects/2.3.6-NNSA/2.3.6.01-LANL-ATDM/2.3.6.01-LANL-ATDM-PMR.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e8e370db41c84998abe21edda07281718a02c55e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "shintaro-iwasaki/ECP-ST-CAR-PUBLIC", "max_issues_repo_path": "projects/2.3.6-NNSA/2.3.6.01-LANL-ATDM/2.3.6.01-LANL-ATDM-PMR.tex", "max_line_length": 1041, "max_stars_count": null, "max_stars_repo_head_hexsha": "e8e370db41c84998abe21edda07281718a02c55e", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "shintaro-iwasaki/ECP-ST-CAR-PUBLIC", "max_stars_repo_path": "projects/2.3.6-NNSA/2.3.6.01-LANL-ATDM/2.3.6.01-LANL-ATDM-PMR.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1516, "size": 7745 }
% ========== Chapter on Response Timing \chapter {Direct stimulation of somatosensory cortex results in slower reaction times compared to peripheral touch in humans} \label{chap:responseTiming} \section{Introduction} While prior work suggests that the integration of somatosensory feedback into a BCI is possible and enhances performance relative to a task without somatosensory feedback, the comparison of human S1 direct cortical stimulation (DCS) to haptic stimulation has not been well explored. Specifically, given that S1 DCS completely circumvents ascending dorsal column pathways, how human subjects’ response times to DCS differ from response times to natural haptic stimulation has not been examined. This is an important consideration for effective BCI development aiming to integrate cortical stimulation as a method of sensory feedback as response latency invariably constrains feedback loop architecture. We asked four subjects to press a button as soon as they perceived either a cutaneous haptic touch to the hand or a percept from S1 DCS via electrocorticographic (ECoG) grids covering the surface of the hand somatosensory cortex (see Fig. \ref{fig:RTschematic} for general overview, Fig. \ref{fig:RTsubjProgression} for subject specific experimental progression). We initially hypothesized that direct cortical stimulation, by bypassing the ascending peripheral circuitry, would result in faster reaction times than peripheral haptic stimulation. We additionally hypothesized that subjects would become faster over multiple blocks of DCS as they learned to interpret the signal, and that subjects’ response times to DCS would decrease with longer, sustained train durations relative to shorter trains with a constant stimulation current amplitude. Remarkably, all four subjects were significantly slower to respond to the S1 DCS than to haptic touch. Additionally, with our two blocks of testing we saw no significant differences between trial types and blocks, suggesting that on a short time scale, appreciable learning was not occurring. In three subjects we tested the train duration hypothesis and found that train lengths as short as 100 ms and up to 800 ms did not significantly affect the response times to the cortical stimulation. We performed off-target testing to serve as a control for the possibility that subjects were responding to stimulation that was applied anywhere in the cortex, rather than directly in somatosensory cortex. This reinforces our testing of electrical stimulation and subsequent activation of primary somatosensory cortex compared to natural ascending peripheral pathways activated through touch, converging on S1. We also included null trials without any stimuli to control for subject suggestibility and response anticipation. Our results shed new light on human perceptual processing of S1 DCS and may direct future studies regarding the application and mechanisms of DCS for both basic neuroscience research and neural engineering applications. \begin{figure}[ht] \centering \includegraphics[width=0.8\textwidth]{figures/responseTiming/RTfigure1} \caption[Experimental protocol]{a) Here, we test the impact on behavioral performance for native cortical input (haptic touch) compared to artificial feedback (bipolar direct cortical stimulation of primary somatosensory cortex via ECoG electrodes). (b-c) Schematic overview of experimental paradigm. b) DCS to S1 hand cortex results in a sensory percept over a specific, consistent location on the hand. c) An experimenter uses a digital touch probe to provide haptic feedback to the same hand location. The subject then responds in both cases as soon as he or she feels sensation in the hand region, using a button held in the opposite hand to perceived sensation.} \label{fig:RTschematic} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.8\textwidth]{figures/responseTiming/RTfigure2} \caption[Experimental progression by subject]{Each column represents the experimental progression for our four subjects from top to bottom. In all subjects, we localized electrodes which elicited a reliable percept on the hand upon stimulation. We then found a threshold level of stimulation where sensations were elicited, and used stimulation currents above this to ensure reliable perception with 200 ms trains. Subjects 2 and 4 both performed a two-alternative forced choice task of discriminating between one and two trains to confirm our test amplitudes were suprathreshold. Subject 2 then performed an intensity matching experiment in which we identified stimulation levels that elicited approximately the same strength of response as the haptic touch provided by the experimenter. All subjects completed experimental trials after we established the suprathreshold current to use. Subjects 2-4 all had two blocks consisting of 100, 200, 400, and 800 ms trains, interleaved with 20 off-target and 10 null trials, followed by 20 haptic stimuli trials interleaved with 10 null trials. } \label{fig:RTsubjProgression} \end{figure} \section{Methods} \subsection{General methods} See \textit{Chapter \ref{chap:generalMethods}, \nameref{chap:generalMethods}}, for a description of the general methods applied to these subjects. Specific details for the response timing experiment are described below. \subsection{Subjects} Human subjects (n=4) were implanted at Harborview Medical Center (Seattle, WA) with electrocorticographic (ECoG) grids (2.3 mm exposed diameter, Ad-tech Medical, Racine, WI, USA) for acute clinical monitoring of intractable epilepsy prior to surgical resection. ECoG grid placement was determined solely based on clinical needs without consideration of research benefits. We conducted all DCS studies after subjects were back on their anti-epileptic medications, after approximately one week of clinical monitoring. Individual patient demographics, including side of electrode implantation and subject handedness, can be found in Table \ref{table:RTperceptLocale}, with their corresponding cortical reconstructions and DCS electrode positions shown in Fig. \ref{fig:RTbySubj}. Epileptic foci are also identified in Table \ref{table:RTperceptLocale}, to illustrate that we expected neurotypical somatosensory cortical processing for our reaction time task. All patients gave informed consent under a protocol approved by the University of Washington Institutional Review Board. All research and methods were performed in accordance with the relevant guidelines and regulations. \subsection{Stimulation waveform and hardware} DCS trains consisted of 200 Hz biphasic pulses with 200 $ \mu s $ per phase, as such DCS trains were previously found to elicit percepts during S1 stimulation \cite{Cronin2016a}. \subsection{Cortical stimulation} Subjects’ perceptual thresholds for DCS were determined by incrementally increasing the current amplitude of a 200 ms DCS train in steps of 250 $ \mu A $ from a starting amplitude of 500 $ \mu A $ (Subjects 1 and 2), 1000 $ \mu A $ (Subject 3), or 200 $ \mu A $ (Subject 4) until the subject could perceive the stimulation as indicated by verbal report (Fig. \ref{fig:RTsubjProgression}). In two subjects (Subjects 2 and 3), the first pair of DCS electrodes that we tried did not elicit a consistent perceptual experience, so we tried a different pair of electrodes and again found the perceptual threshold (Fig. \ref{fig:RTsubjProgression}). Due to experimental time constraints, we only comprehensively tested one pair of stimulation electrodes. During our screening tests we swept through different electrode pairs to choose the pair and stimulation polarity that most reliably produced recognizable percepts localized to the hand. Once we found this pair of electrodes for a given polarity, we conducted all remaining experiments for the day with that bipolar configuration to maximize the number of trials we were able to acquire. We first determined subjects’ stimulation electrodes and perceptual current thresholds as described above, and then used a suprathreshold current amplitude during the experiment for all DCS conditions (Table \ref{table:RTperceptLocale}). To ascertain a suprathreshold stimulation current amplitude, we required two subjects (Subjects 2 and 4) to correctly identify, in ten sequential two-alternative forced choice (2AFC) trials, whether one or two 200 ms DCS trains with a suprathreshold current amplitude were delivered before proceeding from the perceptual thresholding to the response timing experiment (Fig. \ref{fig:RTsubjProgression})). This demonstrated that the subjects could reliably perceive the 200 ms DCS trains at that current amplitude. For the other subjects (Subjects 1 and 3), we achieved reliable discernment of stimulation with a suprathreshold amplitude (250-500 $ \mu A $ above their perceptual threshold) and proceeded with the response timing experiment without conducting the ten sequential 2AFC trials due to time limitations. For Subject 2, after successfully completing the ten 2AFC trials, we attempted to match perceived intensity between the haptic feedback condition and the 200 ms DCS train condition by increasing the DCS current amplitude until the subject felt that the two stimuli were of qualitatively equal strength (Fig. \ref{fig:RTsubjProgression}). We did not attempt intensity matching in Subjects 1 or 4 due to time constraints and patient fatigue. In Subject 3, we did not attempt intensity matching because DCS elicited relatively weak percepts and raising the current amplitude high enough to match its perceived intensity to that of the haptic stimuli would increase the risk of afterdischarges. \subsection{Haptic Stimulation} We applied haptic feedback with digital touch probes (Karolinska Institute) that time stamped the deflection, and touched the cutaneous region where subjects localized the DCS percepts (Figs. \ref{fig:RTschematic}, \ref{fig:RTsubjProgression}). An audio signal presented to the researcher via headphones but which was inaudible to the subject, cued the experimenter to apply the haptic feedback. We used the digital touch probes previously \cite{Collins2016} 15 in conjunction with cortical stimulation, and at the time of manufacturing they were calculated to have a touch onset with an average delay of 1.04 $\pm$ 0.48 ms (mean $\pm$ standard deviation). To account for experimenter variability, and possible hardware changes over time, we measured them again and found them to have a touch onset with a delay of mean 5.24 $\pm$ 3.26 ms (mean $\pm$ standard deviation) and median 6.45 ms relative to an electrical short circuit (Supplemental information, Figs. \ref{fig:RTsupp1}, \ref{fig:RTsupp1}). The small difference in registered touch onset, if added onto the digital touch probe latencies, does not change our significant effects in total (Supplemental information, Table \ref{table:RTsuppResponseTimes}). \subsection{Experimental protocol} After determining DCS current amplitudes, we completed one (for Subject 1) or two (for Subjects 2-4) blocks of response timing trials, each separated into a DCS set and a haptic stimulation set (Fig. \ref{fig:RTsubjProgression}). During the DCS set we delivered DCS train lengths of 200 ms for Subject 1, and train lengths of 100, 200, 400 and 800 ms in the subsequent three subjects (Subjects 2-4). Intertrial intervals of both DCS and haptic feedback were jittered (ranging from 2.5 to 3.5 seconds) to minimize anticipatory effects or rhythmic perception by the subjects. We broke up the DCS and haptic stimulation conditions into separate sets to allow subjects to anticipate and focus on one method of stimulation at a time. We reasoned that interleaving haptic and cortical stimulation within one block would result in a greater degree of uncertainty and error due to perceptual differences between modalities, rather than allowing a comparison between conditions where the subject was acclimated to either stimulation type. All subjects were instructed to respond as quickly as possible by pressing a button held in their hand contralateral to sensation when they perceived the DCS or haptic sensation. The first subject was instructed not to look at the stimulated hand, while the subsequent three subjects (Subjects 2-4) were blindfolded to reduce potential confounds of visual distraction. \subsection{ Off-target control stimulation} As a control, we also delivered off-target stimulation to a region outside of S1 during the DCS experimental set. This was to ensure that the responses were specific to DCS of S1, rather than a response to general, non-targeted DCS. For the off-target stimulation electrodes, we chose two electrodes that would be safe for bipolar stimulation based on prior clinical mapping and knowledge of the subjects’ epileptic foci. We used a 200 ms DCS train length and the same suprathreshold current amplitude for off-target stimulation as we used for S1 stimulation. As detailed below and in Fig. \ref{fig:RTsubjProgression}, Subject 1 completed a third set after the DCS and haptic sets with this off-target control stimulation. For Subjects 2-4, we interleaved off-target stimulation with the on-target, S1 stimulation during the DCS sets. \subsection{ Subject 1 trial progression} In Subject 1 during the DCS set, we delivered 86 trials of 200 ms trains of stimuli with 17 trials of null stimuli (i.e., no stimulation as a control) interleaved in a random order. In the haptic set, we delivered 103 trials of haptic touch, again with 17 interleaved null trials. During the third and final set, we delivered 20 trials of off-target stimulation, interleaved with 6 null trials (Fig. \ref{fig:RTsubjProgression}). \subsection{ Subjects 2-4 trial progression} For Subjects 2-4, we first delivered a DCS stimulation set based on stimuli timing and conditions from a pre-generated file that randomly interleaved 20 trials each of 100, 200, 400, and 800 ms train-length S1 DCS trials with 10 null trials and 20 off-target DCS trials, for a total of 80 S1 DCS trials and 30 control trials. Next during the haptic set, we provided 20 trials of haptic stimulation through the digital touch probes, with 10 null control trials randomly interleaved. After a brief rest period (5-10 minutes), we proceeded to a second block of cortical and haptic stimulation sets (Fig. \ref{fig:RTsubjProgression}). \subsection{Data analysis } We performed all data post processing and analysis in MATLAB and Python with custom scripts. To calculate the response times in the DCS conditions we took the temporal difference between the onset of the stimulation train and the subject’s button press, while for response times in the haptic feedback condition, we calculated the difference between the registered timing of the deflection of the digital touch probe and the subject’s button press. We identified and excluded outliers as trials with reaction times slower than 1 second and faster than 150 ms from further analysis, as faster responses are unlikely for untrained human subjects \cite{Lele1954a}, and slower ones more likely represented a decrease in attention to the task rather than a true response time Additionally, we did not consider trials where either the button did not respond appropriately to the subject’s press, or the digital touch probe did not register deflection. Table \ref{table:RTsuppResponseTimes} includes how many trials were analyzed for each subject and condition. Anderson-Darling tests for normality confirmed that the data was not consistently well described by a normal distribution, therefore we proceeded with non-parametric testing. We corrected for multiple comparisons by dividing an alpha value of 0.05 by the number of conditions tested within each subject. Specifically, both conditions for Subject 1 were not normally distributed (p = 2.725e-4 and 1.888e-8 for haptic and 200 ms DCS conditions, respectively). For Subject 2 the 100 ms DCS, 800 ms DCS, and haptic conditions were not normally distributed (p = 9.631e-5, 0.0096, and 1.399e-16, respectively), while the 200 and 400 ms DCS condition failed to reject the null hypothesis of being normally distributed (p = 0.046, 0.194, respectively). For Subject 3 the 800 ms DCS and the haptic conditions were not normally distributed (p = 0.006 and 3.502e-4, respectively), while the 200 and 400 ms DCS conditions failed to reject the null hypothesis of being normally distributed (p = 0.235 and 0.165, respectively). For Subject 4 the 800 ms DCS and haptic feedback conditions were not normally distributed (p = 0.006 and 1.186e-6, respectively), while the 200 and 400 ms DCS conditions failed to reject the null hypothesis of being normally distributed (p = 0.401 and 0.087, respectively). Due to the presence of non-normally distributed groups, we proceeded with non-parametric testing for all subjects, using the non-parametric Wilcoxon Rank Sum and Kruskal-Wallis tests (with Dunn-Sidák corrections for post-hoc comparisons for mean ranks\cite{Dinno2015,Sidak1967} to assess differences between conditions with an alpha significance level of 0.05. To assess blockwise differences, we used Rank Sum tests with Bonferroni corrections, and a base alpha critical level of 0.05. Further, we tested for equal variances between groups using the Brown-Forsythe test \cite{Brown1974}. For Subjects 2 and 4, testing revealed no significant differences in variances between groups, whereas for Subjects 1 and 3, there were significant differences in variances (critical value of 0.05; not significant- Subject 2: p = 0.094, Subject 4: p = 0.0873; significant- Subject 1: p = 0.0113; Subject 3: p = 5.662e-4). Thus, for Subjects 2 and 4 statistically significant differences between conditions from the Kruskal-Wallis and post-hoc tests were interpreted as differences in medians with haptic stimulation being significantly faster than cortical stimulation, while for Subjects 1 and 3, statistically significant differences were interpreted as differences in stochastic dominance of one sample over another \cite{Dinno2015}. \subsection{Data availability} Data required to recreate the above analyses are in the following repository. \url{https://github.com/davidjuliancaldwell/responseTimingPaper.git} \subsection{Code availability} Code required to recreate the above analyses are in the following repository. \url{https://github.com/davidjuliancaldwell/responseTimingPaper.git} MATLAB and Python are required to generate the full set of figures and analyses. \section{Results} \subsection{Response times} In Subject 1, we compared haptic stimulation to 200 ms trains of S1 DCS with a suprathreshold current amplitude. Haptic feedback elicited a significantly different reaction time as compared to the 200 ms DCS trains (p = 6.105e-16, Fig \ref{fig:RTbySubj}). The median response time for the S1 DCS trains was 459 ms, while the median response time for the haptic feedback condition was 313 ms (Table \ref{table:responseTimes}), consistent with classic tactile reaction times \cite{Lele1954a,Woodworth1954a}. Minimum, 25\% and 75\% quartile ranges, and maximum response times for all subjects are reported in Table 1. This subject did not perceive off-target DCS, and responded to a single null stimulation trial. In light of the results from Subject 1, we subsequently chose to consider possible effects of S1 DCS train length on reaction times, acquiring and comparing haptic responses to train lengths of 100, 200, 400 and 800 ms with suprathreshold currents in Subjects 2-4. In addition to testing four DCS train lengths for Subjects 2-4, we additionally inserted a rest condition in between two blocks to test for habituation or adaptation (Fig \ref{fig:RTbyBlock}). There were no significant differences between blocks for Subjects 2-4, so we combined them for further statistical analyses. Specifically, for Subject 2, there were no significant blockwise differences between the conditions (p = 0.811, p = 0.715, p = 0.675, and p = 0.0962 for the 100, 200, 400, and 800 ms DCS train conditions, respectively; p = 0.579 for the haptic condition, critical threshold of p = 0.01). For Subject 3, we excluded the 100 ms condition from statistical analyses due to only a single response within one block. Blockwise differences were not significant for any of the other conditions for Subject 3 (p = 0.064, p = 0.087, and p = 0.155 for the 200, 400, and 800 ms DCS train conditions, respectively; p = 0.519 for the haptic condition, critical threshold of p = 0.0125). Similarly, for Subject 4, we excluded the 100 ms condition because of a single response on one block, and two responses on another block. Again, blockwise differences were not significant for any of the other conditions for Subject 4 (p = 0.035, p = 0.669, and p = 0.109 for the 200, 400, and 800 ms DCS train conditions, respectively; p = 0.316 for the haptic condition, critical threshold of p = 0.0125). \renewcommand{\tabcolsep}{1pt} \renewcommand{\arraystretch}{0.7} \begin{table}[ht] \scriptsize \begin{tabularx}{\textwidth}{@{}lXXXXXXX@{}} \toprule Subject & Experimental \newline Condition & Minimum \newline (ms) & 25\% quartile \newline (ms) & Median \newline (ms) & 75\% quartile \newline (ms) & Maximum \newline(ms) & Number of trials\newline responded to within \newline response time bounds \\ \midrule 1 & 200 ms & 348 & 422 & 459 & 495 & 821 & 81/86 \\ & touch & 169 & 254 & 313 & 374 & 719 & 73/103 \\ & null & & 724 & & & & 1/40 \\ & off-target & & & & & & 0/20 \\ \midrule 2 & 100 ms & 182 & 232 & 277 & 314 & 551 & 36/40 \\ & 200 ms & 188 & 235 & 254 & 276 & 372 & 40/40 \\ & 400 ms & 169 & 244 & 261 & 288 & 38 & 40/40 \\ & 800 ms & 180 & 234 & 265 & 291 & 488 & 40/40 \\ & touch & 151 & 189 & 198 & 228 & 726 & 38/40 \\ & null & & & 449 & & & 1/40 \\ & off-target & & & & & & 0/40 \\ \midrule 3 & 100 ms & N/A & 514 & N/A & N/A & N/A & 1/40 \\ & 200 ms & 403 & 409 & 442 & 494 & 553 & 9/40 \\ & 400 ms & 383 & 455 & 515 & 603 & 747 & 26/40 \\ & 800 ms & 348 & 466 & 528 & 806 & 994 & 31/40 \\ & touch & 151 & 169 & 222 & 318 & 507 & 30/40 \\ & null & & & & & & 0/40 \\ & off-target & & & 484 & & & 1/40 \\ \midrule 4 & 100 ms & 218 & 219 & 220 & 503 & 786 & 3/40 \\ & 200 ms & 213 & 347 & 408 & 595 & 754 & 13/40 \\ & 400 ms & 305 & 371 & 423 & 588 & 857 & 17/40 \\ & 800 ms & 240 & 334 & 400 & 624 & 882 & 22/40 \\ & touch & 153 & 178 & 201 & 234 & 556 & 19/40 \\ & null & & & & & & 0/40 \\ & off-target & & & & & & 0/40 \\ \bottomrule \end{tabularx} \caption[Reaction times for each subject and each condition]{In all subjects, cortical stimulation resulted in significantly different reactions times than haptic stimulation (assessed through non-parametric Wilcoxon Rank Sum and Kruskal-Wallis tests). Final column reports the number of trials responded to by each subject across both blocks for each of the trial types given our response time limits of 150-1000 ms, and appropriate signal detection. Response times outside of this range were considered outliers based on expected human performance (see Methods, Data Analysis for details). Blank boxes indicate trial types with no responses.} \label{table:responseTimes} \end{table} For Subject 2, all S1 DCS response times were found to be significantly different than the haptic response times due to statistical differences in medians (p = 3.654e-8 for the 100 ms, p = 7.000e-5 for the 200 ms, p = 2.064e-6 for the 400 ms, and p = 1.866e-6 for the 800 ms DCS train, adjusted p-value threshold = 0.05), while no S1 DCS conditions differed significantly from each other (Fig. \ref{fig:RTbySubj}). The median response times for the 100, 200, 400, and 800 ms DCS trains were 277, 254, 261, and 265 ms, respectively, while the median response time for the haptic feedback condition was 198 ms (Table \ref{table:responseTimes}). For this subject we chose off-target stimulation electrodes that had been safely tested during clinical language mapping but used much lower current amplitudes than tested clinically (Fig. 3, Subject 2, off-target electrodes). The subject perceived the off-target stimulation as a vague, non-tactile, and non-localized sensation, and described it as distinct from the DCS sensation. Although he could perceive the off-target DCS, he was able to volitionally choose to not respond to these trial types and did not respond to any of the off-target stimuli within our 150-1000 ms response time window. The subject responded within our time window to a single null stimulus. \begin{figure}[h] \centering \includegraphics[width=1\textwidth]{figures/responseTiming/RTfigure3} \caption[Comparison of reaction times for four subjects and their DCS electrodes.]{Each dot represents a response time for a given trial, colored by condition. Pink indicates the haptic test condition, while turquoise indicates S1 DCS conditions and electrodes over hand sensory cortex. Subject 1 only received the 200 ms DCS and haptic stimulation conditions, while Subjects 2, 3, and 4 had 100, 200, 400, and 800 ms trains of stimulation applied. The two separate blocks for Subjects 2, 3, and 4 were pooled together for each subject. Off-target DCS control electrodes are indicated in yellow. Electrode locations are based on cortical surface reconstructions for each subject as described in the Methods. Electrodes with a plus symbol (+) indicate anodal-first stimulation, while electrodes with a minus symbol (-) indicate cathodal-first stimulation.} \label{fig:RTbySubj} \end{figure} \begin{figure}[h] \centering \includegraphics[width=1\textwidth]{figures/responseTiming/RTfigure4} \caption[Comparison of the two blocked sessions for three subjects.]{Each dot represents a response time for a given trial, colored by block. Of note is the non-normality of some of the response timings for different conditions. Additionally, the paucity of responses for Subject 3 to the 100 ms and 200 ms conditions, and for Subject 4 to the 100 ms condition suggests the stimulation level was at or near their perceptual thresholds.} \label{fig:RTbyBlock} \end{figure} For Subject 3 the 200, 400, and 800 ms DCS response times were found to be significantly different than the haptic feedback response times, due to haptic feedback stochastically dominating the reaction times (p = 0.029, p = 5.971e-8, p = 1.290e-10, respectively), while no S1 DCS conditions differed significantly from each other. Subject 3 only responded in one trial with 100 ms S1 DCS trains with a response time of 514 ms, so we excluded statistical comparisons with the other conditions. Median S1 DCS response times were 442, 515, and 528 ms for the 200, 400, and 800 ms DCS trains, respectively, while the median haptic feedback response time was 222 ms (Fig. \ref{fig:RTbySubj}3, Table \ref{table:responseTimes})). This large difference in medians provides convincing evidence that the cortical stimulation resulted in significantly slower reactions than haptic stimulation. This subject responded within our 150-1000 ms response window once to off-target stimulation, although they did not report being able to perceive the off-target stimulation. The subject did not respond to the null-condition. For Subject 4 the 200, 400, and 800 ms S1 DCS response times were found to be significantly different than the haptic response times due to a significant difference in medians (p = 1.161e-3, p = 8.803e-5, p = 1.107e-4, respectively), while no S1 DCS conditions differed significantly from each other. Subject 4 responded on only three trials with 100 ms S1 DCS trains with a median reaction time of 220 ms, so we excluded the 100 ms DCS condition from further statistical analysis. Median DCS response times were 408, 423, and 400 ms for the 200, 400, and 800 ms DCS trains, respectively, while the median haptic response time was 201 ms (Fig. \ref{fig:RTbySubj}3, Table \ref{table:responseTimes}). This subject did not perceive the off-target stimulation or the null stimulation. For Subjects 1 – 3, there was no indication of adaptation, nor reported description of the stimulus intensity as weakening and changing throughout the DCS sets. After the first block with Subject 4, however, he verbally described a noticeable decrease in stimulation intensity as the trials proceeded. Therefore on the subsequent block we increased the DCS current amplitude from 1.0 mA to 1.2 mA. The subject again verbally described a decrease in perceived intensity as the trials proceeded during the second block despite the increased current amplitude. This suggests individual differences in adaptation to cortical stimulation, perhaps dependent on parameters such as the electrode location, medication status, subject attentiveness, or amount of cerebrospinal fluid underneath the electrodes. \subsection{Qualitative assessment} The subjects described the S1 DCS as non-painful, using descriptions such as a “pins and needles” like sensation (Subject 1), a “buzz”, or the feeling of “something brushing” against the skin (Subject 2), “tingling” (Subject 3), and “pulse” or “throb” (Subject 4). These subjective descriptions are in line with previous reports for S1 DCS10,11,18. The subjects reliably localized the percept from S1 DCS during the experiment and across blocks (see Table \ref{table:RTperceptLocale} for percept localization). However, the pair of electrodes initially chosen for Subject 2 were not reliably localized, with the subject localizing the percepts from some stimuli to the proximal thumb and some to the proximal palmar area of the fifth finger. Therefore, prior to any experimentation, we selected a different pair of electrodes for Subject 2 that generated a percept which the subject reliably localized to the third finger. \renewcommand{\tabcolsep}{2pt} \renewcommand{\arraystretch}{0.3} \begin{table}[ht] \tiny \begin{tabularx}{\textwidth}{@{}lllXXXlX@{}} % \begin{tabularx}{100pt}{@{}lllXllXX@{}} \toprule Subject & Gender & Age & Experiments & Stimulation \newline Current & Coverage and DCS \newline percept localization & Handedness & Seizure \newline Etiology \\ \midrule 1 & Female & 21 & Cortical Stimulation \newline Digital touch probe \newline Off Target & 2500 $ \mu A $ & Right grid \newline Distal phalange of digit 2 & Right & Complex partial epilepsy with multifocal ictal onset and at least 2 distinct epileptogenic areas with seizures arising from right frontal and right temporal regions. No resection / no pathology, VNS implant. \\ 2 & Male & 37 & \textbf{Block 1:} Cortical/Off Target interleaved \newline Digital touch probe \newline \newline \textbf{Block 2:} Cortical/Off Target interleaved \newline Digital touch probe & 1500 $ \mu A $ & Left grid \newline All of digit 3 & Right & Focal epilepsy isolated to a left parietal calcified lesion (widespread calcifications eliciting diffuse and severe reactive changes including astrogliosis and microgliosis with unknown origin). Seizures originating from left lateral parietal cortical lesion. \\ 3 & Male & 26 & \textbf{Block 1:} Cortical/Off Target interleaved \newline Digital touch probe \newline \newline \textbf{Block 2:} Cortical/Off Target interleaved \newline Digital touch probe & 2000 $ \mu A $ & Right grid \newline Distal phalanges of digits 3-5 & Right & Focal epilepsy isolated to a left parietal calcified lesion (widespread calcifications eliciting diffuse and severe reactive changes including astrogliosis and microgliosis with unknown origin). Seizures originating from left lateral parietal cortical lesion. \\ 4 & Male & 34 & \textbf{Block 1:} Cortical/Off Target interleaved \newline Digital touch probe \newline \newline \textbf{Block 2:} Cortical/Off Target interleaved \newline Digital touch probe & 1000 $\mu A$ \newline \newline \newline \newline 1200 $ \mu A $ & Right grid \newline Palmar area near base of digit 1 & Right & MRI negative, partial seizures originating from the left mesial temporal area including the anterior temporal pole and hippocampus. Pathology included mild gliosis with leptomeningeal and subpial reactive changes. \\ \bottomrule \end{tabularx} \caption[Subject Demographics. ]{This table shows the demographics for all the patients in this study, including experiments completed, stimulation currents used, and the localization of subjects’ percepts, electrode locations, and seizure etiology.} \label{table:RTperceptLocale} \end{table} For Subject 2 we attempted to match the perceived intensity of the 200 ms DCS train to that of the haptic stimulation (see Methods, Fig. \ref{fig:RTsubjProgression}), and although we were able to make their intensities more similar to one another, we were not able to match them completely. As we increased the DCS current amplitude, Subject 2 felt that the percept he experienced both increased in intensity and in the size of the localized area. As a result, during the experiment his perceived intensity of the S1 DCS was slightly less than the perceived intensity of the haptic stimulation in order to keep the localized areas of the sensation similar. Despite matching the sensation intensities as well as possible, Subject 2 described the haptic and cortical stimulation as very distinct from one another. The S1 DCS percept was initially localized to the same region as the haptic stimulation (dorsal side of third finger), but then radiated across the skin. \section{Discussion} Our study characterized reaction time differences between cortical and haptic stimulation in four human subjects. Our results demonstrate that response times to cortical stimulation are significantly slower than to haptic stimulation. We additionally demonstrate that cortical stimulation trains of varying lengths do not significantly affect the reaction times for suprathreshold cortical stimulation parameters. Our results are consistent with a previous observation in non-human primates that intracortical microstimulation of area 1 in primary somatosensory cortex results in significantly slower response times than peripheral stimulation \cite{Godlove2014a}. This delayed response for DCS is counterintuitive at first, as one may suspect that bypassing the ascending peripheral afferents through DCS would reduce the distance traversed by the sensory volley and consequently result in faster reaction times. However, as previously suggested \cite{Godlove2014a}, electrical stimulation may be exciting both inhibitory and excitatory connections in unnatural combinations, driving slower behavioral responses. In human neocortex, approximately 20\% of neurons are interneurons, many of which are inhibitory and contribute to local inhibitory neural circuits \cite{Arber2013}. Similarly, in rodent neocortex, approximately 20-30\% of neurons are interneurons \cite{Markram2004a}. This is important when considering the neural response to electrical stimulation, as microstimulation in rodents has been demonstrated to result in a spatiotemporal smear of activity, due to the evoked activity consisting of a combination of fast excitatory responses and inhibitory responses \cite{Butovas2003a}. In addition to an unnatural spatial cortical activation, electrical microstimulation in rodents yields different trends in trial-to-trial variability relative to natural sensory stimuli \cite{Millard2015a}. Thalamocortical simulations suggest that high levels of synchrony generated by electrical stimuli, which are not seen in natural stimuli, are responsible for this difference in the shape of the trial-to-trial variability curves \cite{Millard2015a}. Additionally, electrical microstimulation, as used in the intracortical microstimulation experiments, activates neurons primarily through their axons \cite{Ranck1975,Tehovnik2006a}, although other regions of the cell such as the cell body and dendrites may also be activated depending on stimulus polarity and orientation. Non-human primate work using microstimulation combined with fMRI has shown that electrical stimulation may disrupt cortico-cortical signal propagation by silencing output of areas where the afferents are electrically stimulated \cite{Logothetis2010}. This supports the idea that electrical stimulation results in a distinctly different activation pattern, which may explain a less optimal (and longer response time) reaction to electrical stimuli compared to natural haptic stimulation. Other hypotheses for the delayed response to S1 DCS include the possible need for downstream amplification, from a region such as the thalamus, that is initially skipped via S1 stimulation \cite{Godlove2014a}, or the possibility that surface stimulation is unable to directly stimulate deeper primary somatosensory areas, including area 3b where direct intracortical microstimulation has been shown to elicit similar reaction times to haptic stimulation during a discrimination task in non-human primates \cite{Romo2000a}. Recent work in computational modeling regarding subdural cortical stimulation in humans suggests that bipolar stimulation at our current levels is unlikely to activate pyramidal neurons directly in the deeper areas of the sulci, and rather, the primary activation of neurons occurs in Brodmann area (BA) 1 on the surface of the cortex \cite{Seo2015,Seo2016}, and possibly the superficial aspects (towards the crown of the gyrus) of BA3b. Area 3b, where the majority of thalamocortical connections are thought to project30, is likely sparsely activated, while Area 3a is in the deepest part of the sulcus \cite{Geyer1999}, and is activated even less. Therefore, the lack of our ability to effectively target BA3 may partly explain the delayed reaction times to DCS relative to natural haptic touch. Early cortical stimulation work in elderly dyskinetic patients \cite{LIBET1964} suggested a 500 ms stimulation train was required for consistent perception of DCS with a liminal, or near-threshold, current amplitude. Later work in epileptic patients demonstrated that a 250 ms stimulation train could elicit conscious perception with near-threshold current amplitudes \cite{Ray1999a}. Furthermore, Ray et al. illustrated the inverse relationship between DCS train duration and the current amplitude required for perception, with current thresholds increasing as the train durations decreased \cite{Ray1999a}. We observed a similar phenomenon in Subjects 3 and 4, where for a fixed current, shorter train lengths did not elicit conscious percepts. These two subjects’ inability to reliably respond to the 100 ms train duration condition, suggests that we may have been using a stimulation current amplitude that was too low to reliably discern trains lengths under 200 ms (the train length used for perceptual thresholding) at a fixed amplitude. Additionally, Subject 3 perceived fewer of the 200 ms DCS trials than the 400 ms or 800 ms DCS trials, suggesting that we were stimulating close to the threshold train duration and intensity parameters. In contrast to Subjects 3 and 4, Subject 2 reliably discerned all of the stimulation trains and had much faster reaction times. In this case we seemed to be operating far above the minimum current threshold necessary for the various DCS train lengths tested. As Subject 2 was the only subject for whom we attempted DCS/haptic stimuli intensity matching (see Methods), we used a current amplitude that was notably greater than the subject’s perceptual threshold (roughly 750 $\mu A$ greater). The other subjects completed the task with current amplitudes that were only roughly 250-500 $\mu A$ above their perceptual thresholds. Stronger intensity stimuli are known to produce faster response times \cite{Woodworth1954a}, and it is possible that, to a degree, more suprathreshold DCS currents may lead to faster response times, but further experimentation is necessary to examine this hypothesis. Human tactile, perceptual mean reaction times from one study in untrained, healthy volunteers have been found to vary between 210 and 400 ms \cite{Lele1954a}, but can range down to 140-150 ms with practice for certain individuals \cite{Woodworth1954a} . Reaction times for individuals tend to stay relatively constant between ages 25 and 60 \cite{Woodworth1954a}. As our patients’ ages (21, 37, 26, 34) are close to within this range, we expect little influence of age on the reaction times. With this as a basis for normal comparisons for our untrained subjects, we similarly find a range of different response times to cortical and haptic stimulation, speaking to individual variability. This suggests that for future BCI implementation, an individual’s innate response time may need to be considered in light of variable latencies. That is, if one subject requires on average 500 ms to respond to cortical stimulation, while another subject requires 300 ms, this requires design considerations on the BCI side to account for time differences in the feedback loop. Response times are also modulated by non-somatosensory features such as visual feedback, arousal, motivation, and attention \cite{Woodworth1954a}. In well-practiced healthy subjects, response times based purely on visual feedback are slower than those based on tactile stimuli for a simple reaction time task (approximately 180 ms on average compared to 140 ms, respectively) \cite{Woodworth1954a}. The combination of haptic and visual feedback has been shown to result in faster reaction times relative to visual feedback alone for computer-based tasks in healthy human subjects \cite{Vitense2003a}. We controlled for potential effects of visual feedback by having Subjects 2-4 wear a blindfold, and asking Subject 1 to close her eyes. Subjects’ attention may have also affected their response times, but we did not attempt to quantify their attentiveness. Experimenter observation suggests that Subject 2, who had the fastest response times, was the most engaged in the task and approached it with a competitive, game-like attitude. However, we cannot ascertain that Subject 2’s attentiveness affected his response times, and have presented other possible explanations for his faster responses including use of a higher suprathreshold stimulation amplitude compared to those for the other three subjects. Mere observation suggests that Subject 1 was the groggiest and least engaged in the task, correlating with their slowest haptic reaction times. Future studies may consider including a comparison of response times to S1 DCS and haptic stimuli with visual feedback (i.e., eyes open without a blindfold as would be likely in a future application) to understand how visual feedback may modulate response times. As we increase task complexity and move away from a simple reaction time task as performed here, the benefits from additional feedback beyond only visual feedback may become even more apparent. An additional factor to be explored in the future is the impact of the polarity of the bipolar stimulation used. Due to experimental time constraints we were unable to comprehensively test the effect of anodic relative to cathodic first stimulation at each electrode, but due to the different cortical activation due to the polarity of stimulation, there could be an effect on reaction times and perception.\cite{Tehovnik2009,Yazdan-Shahmorad2011} Each of our blocks lasted on the order of 10 minutes, with 5-10 minutes of rest between the blocks. The lack of a consistent, discernible habituation or learning effect suggests that either the sessions were not long enough or frequent enough to elicit learning or habituation, or that subjects were already reacting close to their fastest possible reaction times. We do not claim that repeated training over multiple sessions and days would not show a decrease in reaction time, but rather we are unable with our acute ECoG epilepsy experiments to address this particular question. In Subjects 1, 3, and 4, the frontal and temporal electrodes used for the off-target stimulation elicited no sensation and were only responded to once by Subject 3. However, in Subject 2 whose off-target stimulation site was over a language area, the subject perceived a vague, non-localizable, sensation of the stimulation. These off-target electrodes had been safely tested during clinical mapping and avoided possible seizure foci. We used current amplitudes much lower than those tested clinically to further avoid afterdischarges and match the suprathreshold stimulation used in the other S1 DCS conditions. Subject 2 described the off-target DCS as distinct from the S1 stimulation conditions, and had no difficulties in responding only to S1 stimulation. This suggests that humans can receive stimulation in multiple cortical regions and distinguish them within short temporal intervals.In Subjects 1, 3, and 4, the frontal and temporal electrodes used for the off-target stimulation elicited no sensation and were only responded to once by Subject 3. However, in Subject 2 whose off-target stimulation site was over a language area, the subject perceived a vague, non-localizable, sensation of the stimulation. These off-target electrodes had been safely tested during clinical mapping and avoided possible seizure foci. We used current amplitudes much lower than those tested clinically to further avoid afterdischarges and match the suprathreshold stimulation used in the other S1 DCS conditions. Subject 2 described the off-target DCS as distinct from the S1 stimulation conditions, and had no difficulties in responding only to S1 stimulation. This suggests that humans can receive stimulation in multiple cortical regions and distinguish them within short temporal intervals. An unknown factor in the work presented here is the extent to which DCS of S1 is also impacting ipsilateral M1, and through connections to contralateral M1, motor output. Our subjects are able to perform motor tasks with the hand being stimulated concurrently, suggesting that there is not grossly visible motor disruption on the ipsilateral or contralateral side. Our subjects also are able to perceive temporally overlapping natural haptic stimulation and DCS at the same spatial location, suggesting that there is not global inhibition or cortical jamming. However, we do acknowledge that some of the delay observed could indeed be due to some potential motor disruption from charge spread. This study does not serve to address this, but rather, presents data revealing significant delays in the timed response to S1 DCS with respect to natural touch. This effect may possibly be due to a delay in conscious perception of the DCS or in the motor output pathway, which has implications for neuroprosthetic and closed loop BCI design. \subsection{Outlook} Our results, while elucidating aspects of human perceptual processing of S1 DCS, demonstrate a need for further exploration of the neural mechanisms underlying the reaction time differences between S1 DCS and haptic stimulation. We found, in four human subjects, that response times to cortical stimulation are significantly different than to haptic stimulation. The fact that there appears to be a significant delay in cortical processing and subsequent response after DCS does not preclude ECoG stimulation from being a promising modality for feedback in a neuroprosthetic application. Rather, this highlights the importance of understanding variables such as human reaction time for neuroprosthetic applications and appropriately designing devices to account for these temporal delays. Our ongoing studies are aimed at understanding and potentially speeding up the temporal response to ECoG stimulation by varying stimulation parameters, regions targeted, and waveform shape. \section{Supplemental Information} \subsection{Digital touch probe latency} In order to assess reaction times to natural haptic touch via mechanical means, we measured the latency between contact of the digital touch probe to the surface and the digitally registered contact. Previous literature \cite{Collins2016} and corresponding work with our group demonstrated the digital touch probes to have an average onset latency of 1.04 ms $\pm$ 0.48 ms standard deviation. To account for experimenter variability and possible hardware sensitivity changes over time, we characterized the latency in onset of the digital touch probes by comparing the registered digital touch probe output relative to an electrical short circuit that triggered on digital touch probe contact with a surface with 294 touches (Fig. \ref{fig:RTsupp1}). Each trial was registered on the Tucker Davis Technologies RZ5D analog inputs via the front panel, and the two triggered onsets were subtracted from one another to calculate an onset latency. We calculated a mean onset latency of 5.83 ms, a standard deviation of 3.26 ms, and a median onset latency of 5.24 ms (Fig. \ref{fig:RTsupp1}). Based on these measurements, we acknowledge that our response timing results are minimally shifted towards a faster reaction time for natural, haptic touch by an average of approximately 6 ms. Adjusting the current findings for these delays (measured reaction times by the mean onset latency) does not significantly impact our results or interpretations. To specifically illustrate this, we recalculated the statistics after adjustments. These resulting p-values, shown in Table \ref{table:RTsuppResponseTimes}, are nearly identical with respect to the originally estimated p-values and thus do not change our conclusions. \begin{figure}[h] \centering \includegraphics[width=0.7\textwidth]{figures/responseTiming/RTsupp1} \caption[Example electrical short circuit and digital touch probe signal traces. ]{Time course of pull-up electrical short circuit relative to the digital touch probe onset. Each touch probe onset was compared to the electrical short circuit that preceded it, as indicated by the horizontal black bars, and these onset latencies were used to generate the distribution of latencies in Fig. \ref{fig:RTsupp1}.} \label{fig:RTsupp1} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.7\textwidth]{figures/responseTiming/RTsupp2} \caption[Distribution of onset delays.]{Distribution of latencies between an electrical short circuit and the registered application of contact onset using the digital touch probe. Not visualized or included in the calculations are 3 outliers at 63, 68, and 69 ms, due to light or inconsistent experimenter force application.} \label{fig:RTsupp2} \end{figure} \renewcommand{\tabcolsep}{1pt} \renewcommand{\arraystretch}{0.7} \begin{table}[ht] \scriptsize \begin{tabularx}{\textwidth}{@{}XXX@{}} \toprule Subject & Experimental Condition \newline Digital Touch Probe vs. & p-value \\ \midrule 1 & 200 ms & 2.101e-15 \\ \midrule 2 & 100 ms & 1.834e-7 \\ & 200 ms & 2.900e-4 \\ & 400 ms & 9.369e-6 \\ & 800 ms & 9.046e-6 \\ \midrule 3 & 200 ms & 0.033 \\ & 400 ms & 7.019e-8 \\ & 800 ms & 1.609e-10 \\ \midrule 4 & 200 ms & 2.225e-3 \\ & 400 ms & 1.125e-4 \\ & 800 ms & 1.422e-4 \\ \bottomrule \end{tabularx} \caption[Adjusted statistics for haptic touch compared to DCS.]{Adjusted p-values, by adding on the latency for the digital touch probe to the haptic response times, and subsequently comparing them to the DCS conditions.} \label{table:RTsuppResponseTimes} \end{table} \section{Related Publications and Presentations} \subsection{Publications} \noindent Caldwell DJ*, Cronin JA*, Wu J, Weaver K, Ko AL, Rao RPN, Ojemann JG, “Direct stimulation of somatosensory cortex results in slower reaction times compared to peripheral touch in humans”, Scientific Reports *These authors contributed equally \medskip \subsection{Presentations} \noindent Caldwell DJ, Cronin JA, Wu J, Kutz JN, Brunton BW, Weaver K, Rao RPN, Ojemann JG, “Spectrotemporal analysis of direct cortical stimulation compared to haptic stimulation in a response timing task in humans”, Society for Neuroscience – Annual Meeting, Washington DC, District of Columbia, November 2017 \medskip \noindent Caldwell DJ, Cronin JA, Wu J, Weaver K, Rao RPN, Ojemann JG, “Direct Cortical Stimulation Results in Slower Reaction Times Compared to Peripheral Touch in Humans”, OHBM 2017, Vancouver, Canada, June 2017 \medskip \noindent Caldwell DJ, “Behavioral and neural differences between haptic stimulation and direct cortical stimulation in humans: implications for neuroprosthetics”, 7th International BCI Meeting, Workshop: Perception of Sensation Restored through Neural Interfaces, Asilomar, CA, May 2018 \medskip
{ "alphanum_fraction": 0.7986212382, "avg_line_length": 165.003125, "ext": "tex", "hexsha": "80f3114ece1938041b913f93495083f18f24e2c7", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "23566bd16e572e059c6a1b9dc423ef2e3e8e2db9", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "davidjuliancaldwell/ThesisText", "max_forks_repo_path": "responseTimingChap.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "23566bd16e572e059c6a1b9dc423ef2e3e8e2db9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "davidjuliancaldwell/ThesisText", "max_issues_repo_path": "responseTimingChap.tex", "max_line_length": 1907, "max_stars_count": null, "max_stars_repo_head_hexsha": "23566bd16e572e059c6a1b9dc423ef2e3e8e2db9", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "davidjuliancaldwell/ThesisText", "max_stars_repo_path": "responseTimingChap.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 12187, "size": 52801 }
\section{Numeric and Mathematical Modules} % numbers -- Numeric abstract base classes % math -- Mathematical functions % cmath -- Mathematical functions for complex numbers % decimal -- Decimal fixed point and floating point arithmetic % fractions -- Rational numbers \input{sections/random} % statistics -- Mathematical statistics functions %
{ "alphanum_fraction": 0.7364130435, "avg_line_length": 36.8, "ext": "tex", "hexsha": "4af4653d3d30b05aae5fe70143c069234893efec", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_forks_event_min_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "remigiusz-suwalski/programming-notes", "max_forks_repo_path": "src/python3/chapters/numeric-and-mathematical-modules.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "remigiusz-suwalski/programming-notes", "max_issues_repo_path": "src/python3/chapters/numeric-and-mathematical-modules.tex", "max_line_length": 66, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "remigiusz-suwalski/programming-notes", "max_stars_repo_path": "src/python3/chapters/numeric-and-mathematical-modules.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-28T05:03:18.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-28T05:03:18.000Z", "num_tokens": 69, "size": 368 }
\documentclass[12pt]{article} \usepackage{geometry} \geometry{left=1in,right=0.75in,top=1in,bottom=1in} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Replace ABCDEF in the next line with your chosen problem % and replace 1111111 with your Team Control Number \newcommand{\Problem}{E} \newcommand{\Team}{2102362} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{newtxtext} \usepackage{amsmath,amssymb,amsthm} \usepackage{indentfirst} \usepackage{gensymb} \usepackage{setspace} \usepackage{subfigure} \usepackage{listings} \usepackage{bm} \usepackage{float} \usepackage{newtxmath} % must come after amsXXX \usepackage{textcomp} \usepackage{graphicx} \usepackage{cite} \usepackage{xcolor} \usepackage{fancyhdr} \usepackage{booktabs} \usepackage{multirow} \usepackage{makecell} \usepackage{epstopdf} \usepackage{hyperref} \usepackage{titlesec} \usepackage{titletoc} \usepackage{appendix} \usepackage{url} \usepackage[final]{pdfpages} \linespread{1} \setlength{\parskip}{0.5\baselineskip} \setlength{\parindent}{2em} \lhead{Team \Team} \rhead{} \cfoot{} \newtheorem{theorem}{Theorem} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{definition}{Definition} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \setlength{\headheight}{15pt} \begin{document} \graphicspath{{.}} % Place your graphic files in the same directory as your main document \DeclareGraphicsExtensions{.pdf, .jpg, .tif, .png} \thispagestyle{empty} \vspace*{-16ex} \centerline{\begin{tabular}{*3{c}} \parbox[t]{0.3\linewidth}{\begin{center}\textbf{Problem Chosen}\\ \Large \textcolor{black}{\Problem}\end{center}} & \parbox[t]{0.3\linewidth}{\begin{center}\textbf{2021\\ MCM/ICM\\ Summary Sheet}\end{center}} & \parbox[t]{0.3\linewidth}{\begin{center}\textbf{Team Control Number}\\ \Large \textcolor{black}{\Team}\end{center}} \\ \hline \end{tabular}} %%%%%%%%%%% Begin Summary %%%%%%%%%%% \begin{center} {\Large \quad Prior Factors of a Food System: Money or Balance? \newline Case Studies on China, USA, and Ethiopia } \vspace{1em} {\large \textbf{Summary}} \end{center} \vspace{-0.5em} \textbf{Food system} is an integrated concept reflecting human activities in the production and acquisition of food. A food system is strongly associated with other systems such as economy system, ecology system, and society system. Nowadays, the stability of a food system are of greater and greater significance for building a harmonious society and promoting national development. However, at present our global food system works mainly based on the control of market, where people in the system are blindly in pursuit of high efficiency and high profitability, causing problems including the imbalance of distribution and environmental degradation. The aim of this report is to \textbf{build a re-optimization model} to evaluate the result of reordering the significance of different \textbf{indicators}. These indicators are efficiency, profitability, equity, and sustainability. We also establish a \textbf{predictive model} to reflect the change of the food system along with time variation. Besides, we do case studies and discuss the completeness of the model. For the re-optimization model, we collect data of 18 indices in 3 countries from 2000 to 2020 in total, and use these indices to build four models for the four indicators. In this part, we first adopt the \textbf{logistic model} to predict the population change. Then we build an differential equation for finding efficiency, and use \textbf{Runge-Kutta method} to get the numerical solution. According to the results, we build a model for evaluating equity. Next, in the model for evaluating profitability, we use \textbf{linear programming} to ensure that the data we use is within a reasonable range. Finally, we use \textbf{linear} and \textbf{nonlinear fitting} method to evaluate sustainability. For the prediction model, we normalize all the data we derived, and then obtain the values of weight through \textbf{analytic hierarchy process (AHP)}. We assess the validity of the weights to prove the correctness of comparison matrix. We also provide calculations of the final prioritized food system index $PFSI$ by doing a special \textbf{weighted summation}. The result of $PFSI$ shows the food system evaluation results under a certain degree of emphasis for the four indicators. If $PFSI$ is closer to 0, then the focus of the food system we analyze is more on efficiency and profitability. If the result is closer to 0, then the focus is more on sustainability and equity. For the discussion section, we first use the data we collected for the prediction of the food system after re-optimization. Then, we discuss the benefits and costs for changing the order of the four indicators. We apply our model into \textbf{China}, \textbf{USA}, and \textbf{Ethiopia} for case studies and analyze the different consequence between developing countries and developed countries. Moreover, we pay attention to the scalability and adaptability of the model. Finally, we discuss the strengths and potential improvements of the food system model. \vspace{2em} \textbf{Keywords}: Food system, Analytic hierarchy process (AHP), Logistic model, Linear Programming, Runge-Kutta method, Nonlinear fitting, Linear fitting % to here %%%%%%%%%%% End Summary %%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \clearpage \pagestyle{fancy} % Uncomment the next line to generate a Table of Contents \newpage \setcounter{page}{1} \rhead{Page \thepage\ } %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \titlecontents{section}[0cm]{\fontsize{12pt}{\baselineskip}\selectfont}{\hspace*{3em}\contentslabel{2em}\ }% {}{\titlerule*[0.5pc]{$\cdot$}\contentspage\hspace*{1cm}\vspace*{-0.3em}}% \titlecontents{subsection}[1cm]{\fontsize{10pt}{\baselineskip}\selectfont}{\hspace*{3em}\contentslabel{2em}\ }% {}{\titlerule*[0.5pc]{$\cdot$}\contentspage\hspace*{1cm}\vspace*{-0.5em}}% \titlecontents{subsubsection}[2cm]{\fontsize{10pt}{\baselineskip}\selectfont}{\hspace*{3em}\contentslabel{2em}\ }% {}{\titlerule*[0.5pc]{$\cdot$}\contentspage\hspace*{1cm}\vspace*{-0.6em}}% \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \vspace{-1em} \subsection{Background} The food system is conceived as a set of human activities from food production to processing, distribution, and consumption \cite{cite:Prior}. Its complexity shows up in its manifold drivers, feedback, outcomes, and relations with other human systems, and its concepts are constantly enriched with human's increasing attention to its connotation. From a macroscopic perspective, the current analysis of food systems is strongly connected to the topic of \textbf{food security}, which refers to a goal that everyone in the world can get access to sufficient and nutritious food to satisfy the diet needs \cite{cite:Concept}. For example, a scientific group for the 2021 United Nations food systems summit suggests that the ability of the food system to enable every human to be well-nourished should be the core of the overall action tracks, which can be recognized as a pursuit for sustainability and equity \cite{cite:Summit}. \vspace{-1em} \begin{figure}[H] \centering \includegraphics[width = 0.5\textwidth]{figure/FS.pdf} \vspace{-1em} \caption {Sustainable food systems \cite{cite:fig}} \end{figure} \vspace{-1em} However, due to the leading rule of the market, at present the prior objective of the global food system is maximum efficiency along with the highest profit \cite{cite:Market}. The market control of trade and prices and regional inherent inequities make the basic demands for food by many kinds of vulnerable groups hard to be satisfied. As a result, distribution problems such as food waste and food shortage have emerged. For example, in 2016 about 10.7\% people in the world suffer from malnutrition problems, but at the same time about 1/3 of all the food produced is wasted \cite{cite:USDA}. Furthermore, negative interactions that are exerted between the food system and other correlated systems like environment system and economic system make the dilemma of the food system harder to solve. \vspace{-1em} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Restatement of the Problem} To ensure a more stable food system with a more comprehensive selection of different objectives, an optimization model must be established to improve the current conditions of the food system. As a consultant team for the International Comestibles Management (ICM) Committee, we are going to implement the following tasks: \begin{itemize} \item Find the factors related to the food system, determine the objectives of our model, and choose representative indicators. \item According to the indicators we fix, build a comprehensive evaluation model for the current food system. \item Re-optimize the food system given a more preferable objective, and compare the ideal food system with the current one, and then analyze the feasibility of improving the food system. \item Discuss the trade-offs of optimization, including both benefits and costs of the change. \item Apply the model to countries with different levels of development and compare the results. \item Analyze the extendability and adaptability of the food system model. \end{itemize} \vspace{-1em} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Our Work} Our efforts can be summarized in the following flow chart. \begin{figure}[H] \centering \includegraphics[width = 1\textwidth]{figure/procedure.pdf} \caption{Structure of the model and paper} \label{procedure} \end{figure} \section{Assumptions and Justifications} We make the following assumptions in the modelling process. \noindent \textbf{Assumption 1.} \textit{Country is the unit of a food system.} According to the instructions of the question of comparison between developed and developing countries, we assume that our evaluation for food systems is conducted in terms of country, and the model only synthesizes the consideration of food system for profitability, efficiency, equity, and sustainability. \noindent \textbf{Assumption 2.} \textit{Imports and exports among countries are neglected.} We assume that we do not consider the import and export of food among countries. In addition, we assume that food is distributed equally within the country we study. Since based on the data provided by FAO almost all the major countries in the world have the self-sufficiency rate of food larger than 0.8 \cite{cite:Self_support}, we regard each country we analyze as an independent place where people produce and market all by themselves. \noindent \textbf{Assumption 3.} \textit{All the data is reliable and precise.} We assume that the data we collected from databases are reliable. They are from major websites including FAO \cite{cite:FAO_data}, World bank \cite{cite:Worldbank}, Statista \cite{cite:Statista}, and Worldometers \cite{cite:Worldometers}. \noindent \textbf{Assumption 4.} \textit{Unpredictable natural disasters and other sudden changes will not happen.} Unforeseen natural hazard such as drought and flood disasters will affect the food system as well as its indices dramatically, so we assume that within the predicted time there are no sudden far-reaching changes. \noindent \textbf{Assumption 5.} \textit{The change in the population of a country is continuous.} Since indices of the model is based on the population in a country, we assume that there are not mass migrations or wars that could affect the population in the country. %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Model Preparation} \subsection{Notation} In this paper, we define some symbols and parameters, and their notations are shown in Table \ref{tab:notation}. \begin{table}[!h] \centering \caption{\label{tab:notation} Notations} \vspace{1em} \begin{tabular}{lll} \toprule Symbols & Description & Units \\ \midrule $t$ & time & year \\ $PM$ & Policy motivation, an adjustable constant & 1 \\ $UR_{2020}$ & Undernourishment rate in 2020 & \% \\ $OR_{2020}$ & Obesity rate in 2020 & \% \\ $p(t)$ & Population & person\\ $p_{2020}$ & Population in 2020 & person \\ $S_m(t)$ & Target total yield in year $t$ & ton \\ $S(t)$ & Total yield & ton \\ $S_{2020}$ & Total yield in 2020 & ton \\ $s_1(t)$ &Target total yield of cereal in year $t$& ton\\ $s_2(t)$ &Target total yield of fruit in year $t$& ton\\ $s_3(t)$ &Target total yield of oil crops in year $t$& ton\\ $s_4(t)$ &Target total yield of vegetables in year $t$& ton\\ $s_5(t)$ &Target total yield of dairy in year $t$& ton\\ $s_6(t)$ &Target total yield of meat in year $t$& ton \\ $PR_i(t)$ &The corresponding average prices for six different foods in year $t$ & \$/ton \\ $IF$ &Average inflation rate of the studied country from 2000 to 2020 & \%\\ $IN(t)$ &The annual income of the Food sales in year $t$ & 100000 \$ \\ $GHG(t)$ & Total greenhouse gas emissions from agriculture in year $t$ & CO$_2$ equivalent \\ $r_1$ & Weight of efficiency & 1 \\ $r_2$ & Weight of equity & 1 \\ $r_3$ & Weight of profitability & 1 \\ $r_4$ & Weight of sustainability & 1 \\ $rs$ & Relative significance of \textit{EQ} (or \textit{SU}) with respect to \textit{EF} (or \textit{PF}) & 1\\ $EF(t)$ & Score of efficiency in the food system & 1 \\ $EQ(t)$ & Score of equity in the food system & 1 \\ $PF(t)$ & Score of profitability in the food system & 1 \\ $SU(t)$ & Score of sustainability in the food system & 1 \\ $PFSI(t)$ & Prioritized food system index & 1 \\ \bottomrule \end{tabular} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Model Design} To complete our model for the food system, we use $EF$, $EQ$, $PR$, and $SU$ to denote the score of efficiency, score of equity, score of profitability, and score of sustainability respectively. Then the final food system index $PFSI$ can be derived by a special weighted average process. \subsection{Preparation: Classic Population Logistic Prediction Model} To predict the population in the future for each year, we searched for possible population forecasting models in the database. We choose \textbf{logistic model} to complete the prediction \cite{cite:logi}, since compared with \textbf{Malthus model} we should consider constraints on natural resources, environmental capacity and policies to give a changing population growth rate $r$. We use \textbf{linear least square method} to obtain the initial value $r_0$ and maximum population $P_{max}$. The solution procedure is shown below. \begin{enumerate} \item Collect population data of China, USA, and Ethiopia from 2000 to 2020. \item Suppose the population growth rate $r$ is a function varying with population $P$: $r(P) = r_0 - P$. The equation of the logistic model is given by $$\frac{\mathrm{d} P}{\mathrm{d} t} = r_0 (1-\frac{P(t)}{P_{max}})P(t).$$ \item Get the difference equation by backward difference: $$\frac{P(k)-P(k-1)}{P(k)} = r_0 - s\cdot P(k),\text{ }k = 2, 3, \cdots, 21.$$ \item Fit the parameters $r_0$ and $P_{max}$ by linear least square method. The result of China and USA are shown in Table \ref{tab:pplt} below. \begin{table}[H] \centering \caption{\label{tab:pplt} Values of $r_0$ and $P_{max}$} \vspace{1em} \begin{tabular}{ccc} \toprule & $r_0$ & $P_{max}$ \\ \midrule China & -0.0219 & $1.83\times 10^9$ \\ USA & -0.0332 & $0.408\times 10^9$ \\ Ethiopia & -0.0381 & 0.328\times 10^9\\ \bottomrule \end{tabular} \end{table} \item Use the separation of variables method for differential equation to derive the analytical solution $$P(t) = \frac{P_{max}}{1+(\frac{P_{max}}{P_{2020}}-1)\cdot e^{r_0(t-2020)}}.$$ \end{enumerate} The result of our fitting for the population in China and USA is shown in Figure \ref{fig:USA_pplt} and \ref{fig:China_pplt}. \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/USA/USA_pplt.eps} \caption{Logistic Fit of Population of USA.\label{fig:USA_pplt}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/China/China_pplt.eps} \caption{Logistic Fit of Population of USA.\label{fig:China_pplt}} \end{minipage} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model for evaluating \textit{EF}} \label{sec:EF} Stockholm Environment Institute defines the efficiency in the food system as ``the ratio of outputs to inputs'' \cite{cite:Efficiency_1}. However, to estimate factors of input and output of different kinds of food requires a huge set of data from measurements and experiments, and from all the literature we read no one even try to explore the overall efficiency of a complex food system. We finally select the sum of the annual food yield of different food as an indicator for efficiency. The selection of the indicator refers to work by \cite{cite:Esti_efficiency}. In order to calculate the value of efficiency in the food system we choose, we do the following steps: \begin{itemize} \item \textbf{Find the total yield.} Based on the food pyramid provided by a Harvard research \cite{cite:Food_pyramid}, we divide food into six groups: cereal, fruit, oil crops, vegetable, dairy, and meat. The total food yield is given by Formula \eqref{eq:yield} below. \begin{align} \label{eq:yield} S = \sum^6_{i=1}s_i, \text{ where }s_i\text{ is the yield of the i-th food.} \end{align} \item \textbf{Determine the target yield in year $t$.} Consider the malnutrition rate and obesity rate among adults, the value of the target yield in the i-th year $S_m(t)$ can be obtained by Formula \eqref{eq:target} below. \begin{align} \label{eq:target} S_m(t) = \frac{S_{2020}/(1+OR_{2020}-UR_{2020})}{p_{2020}}\cdot p(t). \end{align} \item \textbf{Establish the equation for yield $S(t)$.} In the equation we introduce a coefficient $PM$, called policy motivation, to reflect the regulation of priorities. First, our aim is to ensure that the larger $PM$ is, the greater the efficiency $EF$ changes. Second, we anticipate that if the current food yield $S$ is larger than the target yield $S_m$, $S(t)$ will decrease with respect to time $t$, and if the current $S$ is less than the target yield $S_m$, $S(t)$ will increase with respect to time $t$. Third, the first two changes should be applied to food yield $S(t)$ itself. Then we obtain Equation \eqref{eq:logi}, which is similar to the equation of logistic model, but \textbf{the denominator in the middle entry is now a variable, instead of a constant}: \begin{equation} \left\{ \begin{aligned} \label{eq:logi} & \frac{\mathrm{d} S}{\mathrm{d} t} = PM\cdot (1-\frac{S(t)}{S_m(t)})\cdot S(t) \\ & S_{\text{initial}} = S_{2020} \\ \end{aligned} \right. \end{equation} \item \textbf{Solve the differential equation.} In order to obtain the numerical solution of this differential equation, we adopt \textbf{Runge-Kutta method} for the initial-value problem \cite{cite:Runge}. First, we separate the interval $[2020, 2050]$ (indicating the year from 2020 to 2050 as our evaluation range) into $N$ sub-intervals $[t_n, t_{n+1}]$ ($n = 0, 1, \cdots, N-1$). Then, by the mean value theorem we have \begin{align} S(t_{n+1}) - S(t_n) = \int^{t_{n+1}}_{t_n} f(t,S(t)) \mathrm{d}t = (t_{n+1}-t_{n}) f(\xi, S(\xi)), \text{where }\xi\in [t_n, t_{n+1}] \text{\cite{cite:Runge}}. \end{align} Finally, by approximation we get \begin{align} S_{n+1} -S_n = (t_{n+1}-t_n) \sum^m_{i = 1} c_i f(\xi_i, y(\xi_i)), \text{where we choose }m = 4, c_1 = c_4 =1/6, \end{align} and $c_2 = c_3 = 1/3$. \item \textbf{Normalize the result.} After obtaining results of the total yield, we rate the values by making a \textbf{min-max normalization}. The form is shown in Formula \eqref{eq:Normal}. \begin{align} \label{eq:Normal} EF(t) = \frac{S(t)-S_{\min}}{S_{\max}-S_{\min}} \end{align} \end{itemize} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model for evaluating \textit{EQ}} According to the literature found on the database \cite{cite:Equity_def, cite:Equity_def2}, equity problems in the food system includes racism, gap between the rich and the poor, gender inequality, etc. In our model for the food system, since we take each country as a unit, we are aimed at regarding a country as a whole and finding the difference between countries. Therefore, for all studied objects, we assume that categories in the dietary structure are totally included by the types of agricultural products produced, which is actually mostly the case. Therefore we claim that countries we selected can be \textbf{self-sufficient} in food categories. We then come up with a method making full use of the outcomes of Subsection \ref{sec:EF}. We use \textbf{the ratio of supply and demand $S(t)/S_m(t)$} to denote the level of equity. The interpretation comes from the fact that if the current total yield is close to the target yield then everyone in the country has a chance to have the necessary amount of food, neither too excessive (which implies that there are some people in the country possess the food amount more than they need) nor too insufficient (which implies that there are some people in the country possess the food amount less than they need). Finally, we normalize the result in the same procedure as what is shown in Subsection \ref{sec:EF}. %%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model for evaluating \textit{PF}} \label{sec:pf} The profitability in the food system can be defined as the \textbf{total income} by selling all the food in terms of their different prices. Besides, the price by prediction should have limits in the scope of value to prevent price mutations in a certain year. We assume that people are rational enough to always pursue the maximum profit. Therefore, we use \textbf{linear programming} to solve this problem. Formula \eqref{eq:profit} shows the calculation form. For the constraints on linear programming problems, we stipulate the yields of each food \textbf{within the yields interval for recent five years}, so that we can ensure the continuity of the yields. For the future price of food, we apply \textbf{linear fit} to each kind of food to predict. \begin{gather} % % \centering \max \quad \sum_{i=1}^{6} (s_i \frac{PR_i}{IF}), \notag\\ \mbox{s.t.} \quad \sum_{i=1}^{6} s_i =S(t),\label{eq:profit} \\ \min \{ \frac{s_k(t-j)}{S(t-j)} \} \leq \frac{s_k(t)}{S(t)} \leq \max \{ \frac{s_k(t-j)}{S(t-j)} \}, j=1,2,3,\ldots,5, \quad k = 1,2,3,\ldots,6. \notag \end{gather} % &\min \{ \frac{s_2(t-j)}{S(t-j)} \} \leq \frac{s_2(t)}{S(t)} \leq \max \{ \frac{s_2(t-j)}{S(t-j)} \},&j=1,2,3\ldots,5 \\ % &\min \{ \frac{s_3(t-j)}{S(t-j)} \} \leq \frac{s_3(t)}{S(t)} \leq \max \{ \frac{s_3(t-j)}{S(t-j)} \},&j=1,2,3\ldots,5 \\ % &\min \{ \frac{s_4(t-j)}{S(t-j)} \} \leq \frac{s_4(t)}{S(t)} \leq \max \{ \frac{s_4(t-j)}{S(t-j)} \},&j=1,2,3\ldots,5 \\ % &\min \{ \frac{s_5(t-j)}{S(t-j)} \} \leq \frac{s_5(t)}{S(t)} \leq \max \{ \frac{s_5(t-j)}{S(t-j)} \},&j=1,2,3\ldots,5 \\ % &\min \{ \frac{s_6(t-j)}{S(t-j)} \} \leq \frac{s_6(t)}{S(t)} \leq \max \{ \frac{s_6(t-j)}{S(t-j)} \},&j=1,2,3\ldots,5 \\ Finally, we normalize the result in the same procedure as what is shown in Subsection \ref{sec:EF}. \subsection{Model for evaluating \textit{SU}} According to Holden et al. \cite{cite:Sus1}, the excessive consumption of fossil fuel is one of the most severe influence current food systems have on the natural environment, and causing the food system unsustainable. Hence, we choose the total emissions from agriculture in one country as an indicator of the sustainability. Considering the fact that the larger food production is, the more emissions from agriculture will be, we take the quotient, i.e., the emissions per unit of the yield, to reflect the performance of sustainability. We should also take the policy motivation into account: \begin{itemize} \item Before the policy change, we do \textbf{liner fittings} for the data of greenhouse gas emissions and the data of total yield per year we collected respectively. Figure \ref{fig:lf_E_yield_GHG} shows the fitting performance of greenhouse gas emission and annual yield. The coefficients of determination of linear fit of them are $0.9706$ and $0.9603$ respectively, which are very close to the ideal value $1$. Equations \eqref{eq:lf_E_GDG} and \eqref{eq:lf_E_yield} show the fitting results. \begin{figure}[htbp] \centering \subfigure[Linear Fit of the Annual Yield of Ethiopia.]{\includegraphics[width=0.48\textwidth]{figure/model/Ethiopia/Ethiopia_yield_fit.eps}} \subfigure[Linear Fit of the Greenhouse Gas Emmision of Ethiopia.]{\includegraphics[width=0.48\textwidth]{figure/model/Ethiopia/Ethiopia_GHG_fit.eps}} \caption{Sensitivity Test for Policy Motivation \textit{PM}.\label{fig:lf_E_yield_GHG}} \vspace{0.2in} \end{figure} \begin{gather} GHG(t) = a\cdot t - b, \text{where } a = 2576, b = 5.049\times10^6 \label{eq:lf_E_GDG}\\ S(t) = c\cdot t - d, \text{where } c = 1.228\times10^6, d = 2.443\times10^9 \label{eq:lf_E_yield} \end{gather} Then we have the expression below: $$\frac{GHG(t)}{S(t)} = \frac{a t-b}{c t-d}.$$ \item \textbf{Modify the equation.} After the policy change, we take $PM$ into account, and update the expression as below (suppose the policy change occurs in 2020): $$\frac{GHG(t)}{S(t)} = \frac{a (t-2020) + (2020 a -b)(1-PM)}{c (t-2020) + (2020 c - b)(1-PM)},$$ from where we should notice that the new function has \textbf{the same asymptotic line as the older one} (which is $y = a/c$ as $x\to \infty$), and the two functions \textbf{intersect at \textit{t = 2020}}. The new function also indicates that \textbf{when \textit{PM} = 0 the two functions are equal}, and given a stronger policy motivation $PM$ the new function will drop more steeply. \end{itemize} After we predict the value of sustainability by year, we normalize the result in the same procedure as what is shown in Subsection \ref{sec:EF}. \subsection{Model for evaluating \textit{PFSI}} We obtain the prioritized food system index $PFSI$ by making weighted summation of $EF$, $EQ$, $PR$, and $SU$. Values of weight are obtained through \textbf{Analytic Hierarchy Process (AHP)}. To reasonably indicate the main focus of the food system, we define Formula \eqref{eq:PFSI} below to find the value of $PFSI$: \begin{align} \label{eq:PFSI} PFSI(t) = (1-EF(t))\cdot r_1 + (1-PR(t))\cdot r_3 + EQ(t)\cdot r_2 + SU(t)\cdot r_4 \end{align} It is suggested that \textbf{when \textit{PFSI} is closer to 0 the focus of the food system is more on \textit{EF} and \textit{PR}}, and \textbf{when \textit{PFSI} is closer to 1 the focus of the food system is more on \textit{SU} and \textit{EQ}}. %图? The steps of calculating weights by \textbf{AHP} are shown below: \begin{itemize} \item We first determine the relative significance of four factors (\textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU}). Specifically, \textit{EF} and \textit{PF} are considered to have the same significance, and \textit{EQ} and \textit{SU} are considered to have the same significance. We set the the significance of \textit{EQ} (or \textit{SU}) with respect to \textit{EF} (or \textit{PF}) to be $rs$. Then, the comparison matrix $A$ (Equ. \eqref{eq:comM}) is set up. \begin{equation} A=\left[\begin{array}{cccc} 1 & 1 & 1/rs & 1/rs \\ 1 & 1 & 1/rs & 1/rs \\ rs & rs & 1 & 1 \\ rs & rs & 1 & 1 \end{array}\right] \label{eq:comM} \end{equation} In the case study (Sec. \ref{sec:case}), we set $rs = 7$. However, the absolute value of $rs$ does not affect the general shape of the final result, which will be verified in sensitivity analysis (Sec. \ref{sec:sense}). \item Calculated the eigenvalues of matrix $A$, we can further determine the weights of \textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU} by normalize the eigenvector of the maximum eigenvalue. Namely, the weight vector can be calculated through Equ. \eqref{eq:AHPweight}. \begin{equation} \mathbf{w} = \frac{\mathbf{v}}{\|\mathbf{v}\|_1} = \frac{\mathbf{v}}{\sum_{i=1}^4|v_i|} \label{eq:AHPweight} \end{equation} \item To prevent the possible conflict caused by arbitrarily set significance, we are supposed to assess the validity of the weights. We define the consistency index $CI$ as \begin{equation} CI = \frac{\lambda - 4}{4-1} = \frac{\lambda - 4}{3}, \end{equation} where $\lambda$ is the maximum eigenvalue of comparison matrix $A$. Then, the consistency ratio $CR$ can be calculated through \begin{equation} CR = \frac{CI}{RI}, \end{equation} where $RI$ is random consistency index. Here, we have 4 factors in total, so $RI=0.9$. In general, if $CR<0.1$, the weights obtained from AHP is valid. \end{itemize} \section{Discussion} \subsection{Task 1: Re-Optimization for the Food System} Since there are interactions among the model for efficiency, profitability, sustainability, and equity, after the optimization for equity and sustainability we need to look at the consequence of the changes of their weight. We consider the situation in different countries. \begin{itemize} \item \textbf{For a country in which the undernourishment rate is less than the obesity rate, we claim that the total food production amount is enough to satisfy the need of all the population.} The country produces food surpluses. As a result, under the influence of policy motivation, the country will tend to produce exactly the amount of food they need, so that the total yield of food \textbf{will decrease at first}, causing a decline of $EF$. When the intermediate rate state is reached, the prediction of the level of efficiency will depend on the population change as well. In the countries where the population is growing continuously, the annual food yield should try to catch up with the population, and accordingly \textbf{the total yield and $EF$ will increase since then}. In the countries where the population is declining, the change will be in the opposite case. Besides, the value of the level of \textbf{equity ($S/S_m$) will decrease to 1} at first since the food yield is large. According to the differential equation, we can see that at this time the growth rate of the yield is 0. Hence, the value of $S/S_m$ will go on decreasing until less than 1. However, the slope of $S$ will be higher until it surpasses the population growth rate, so that \textbf{in the end $S/S_m$ will approach to 1}, implying that the amount of food per person is more equitable. As for the profitability, since the prices of each kind of food is not fixed, these prices are predicted by linear fitting. However, one thing is known that we use the yields interval for recent five years for the linear programming, so that we may infer from the expression that \textbf{the level of profitability will change smoothly}. Another supporting argument is that we aim to find the maximum income for each year so that the \textbf{food system still seek for the maximum benefits even sustainability is considered now}. For the prediction of the change of sustainability, we can find that the \textbf{greenhouse gas emissions keep increasing in developing countries} like China, and remain nearly constant in developed countries like the USA. Then after exerting policy motivation, \textbf{the value of $GHG(t)/S(t)$ will decrease faster than the case before re-optimization}. However, for our model in the end the level of sustainability will be the same as before because the two functions have the same asymptotic line. It reflects that this model loses some accuracy for a very long term prediction. \item \textbf{For a country in which the undernourishment rate is higher than the obesity rate}, the total food production amount is not enough. In this case, the total yield will be raised to meet the need for food by residents. If the population growth rate is larger than \textbf{$PM$, $S/S_m$ will be firstly less than 1 then approach 1}. In the end, the population reaches its limit, and correspondingly the yield will approach approximately a constant. On the contrary, \textbf{if the population growth rate is less than $PM$}, then $S/S_m$ will decline due to the fact that the yield does not catch up with the rate of population growth. When the population growth rate begins decreasing, $S/S_m$ will rise up again until leveling off to 1. The analysis of profitability and sustainability is the same as above. \end{itemize} \textbf{We define 0.5 as the balance value for efficiency, profitability, equity, and sustainability.} It means that we regard them as equally important. According to our model, the time that a food system needs to implement should be counted from the occurrence of policy change to the moment when $PFSI$ reaches 0.5 from either side for the first time. The interpretation is that when $PFSI > 0.5$ indicators including sustainability and equity become more important than efficiency and profitability. %%%%%%%%%%%%%%%%%%%%% \subsection{Task 2: Benefits and Costs} We only consider the case of changing the priorities of a food system from efficiency and profitability to sustainability and equity. The opposite direction will give the contrary result. \subsubsection{Potential benefits} \begin{itemize} \item \textbf{Distribution become more reasonable within developed countries, and will first receive a drop in some developing countries, but later will recover.} When we change the prioritization of food system to sustainability and equity, it is obvious that \textbf{ developed countries will tend to distribute its food more fairly} among its residents, because the equity is always approaching 1. However, for some developing countries, Ethiopia, for example, firstly received a drop in equity. This can result from the fact that the production rate fails to exceed the population rate at first. But later, equity of developing countries will approach 1 again as the production rate increases and the population growth rate decreases. \item \textbf{Under the motivation of policy, sustainability for both developed countries and developing countries become stronger.} As it will also \textbf{limits the increase of emission amount of greenhouse gas} if the total yields grow. Such effect becomes \tetxbf{mostly apparent in the developed country}, as the obesity rate is much larger than the undernourishment rate, and the yield generally decreases. While in some developing country, which has a large undernourishment rate, yield growth is also limited with stronger sustainability, so that the atmosphere and environment are protected, and used more efficiently. \end{itemize} \subsubsection{Potential costs} \begin{itemize} \item \textbf{Developing countries will become tougher when cooperating with the increase of the population} For some developing countries, whose obesity rate is slightly larger than the undernourishment rate, they will first drop the yields to fit the current population and then try to keep pace with the population growth. Comparing with such a "following" strategy, if countries try to increase their yields at first to cope with the future population, they will become more prepared. \item \textbf{Profit from the food industry will always decrease regardless of developed and developing countries.} In the later case study section, we illustrate the change of profitability, and these charts show that all developed and developing countries received profit loss. On one hand, this can result from some yield drop in some developed countries. On the other hand, some developing countries have a relatively large inflation rate, causing the actual profit to become less valuable. \end{itemize} \subsubsection{Prediction on the occurrence of the benefits and costs} Here, we firstly define \textbf{the occurrence of the previously mentioned benefits and costs as the time when equity is within the interval [0.95,1.05] and is continuously monotonically increasing/ monotonically decreasing to approach 1}. With that definition and referring to the findings in section \ref{sec:case}, we can find that China's occurrence time happened in the approximately year 2045 and that Ethiopia's occurrence time happened in the approximately year 2025, while America's occurrence time happened in the approximately year 2051. Therefore, with these experiment result, we can qualitatively conclude that \textbf{the occurrence time of developed countries is later than the developing countries under the same policy motivation (PM)}. %%%%%%%%%%%%%%%%%%%%% \subsection{Task 3: Case Studies}\label{sec:case} Instead, we need to utilize the prediction result of food prices and greenhouse gas emissions. Here we only show the plot of the scores for each indicator by using the method mentioned before. \subsubsection{China: Developing Country with $UR_{2020}$ relatively less than $OR_{2020}$} China is the representative developing country whose economic situation is very close to some developed countries. Specifically, the undernourishment rate of China is much smaller than the obesity rate in year 2020. This fact reflected a high possibility that the food China produced is more than the demand of people. However, as a developing country, its population is still in the quickly rising period. Thus, \textbf{the current supply of food can not satisfy the demand of its future maximum population}. To satisfy the food demand of current population, \textbf{the yield amount will firstly receive a slight drop until year 2035. But after that, the yield will continuously increase to follow the increase of population and corresponding demand.} Figure \ref{fig:China_yield} shows the predicted situation with policy motivation. This result is obtained from the model for evaluating \textit{EF} with \textit{PM}$=0.1$. Meanwhile, the equity of food will be improved due to a better arrangement of supply. As the annual yield is controlled by the policy, the supply is gradually close to the demand (Fig. \ref{fig:China_equity}), which ensures the domestic equity of food system. As shown in the Figure \ref{fig:China_yield} and \ref{fig:China_equity}, the effect of the policy will reach the maximum at approximately year \textbf{2035}, namely, 15 years after the implementation of policy. After about \textbf{25 years} of continuously policy motivation, the re-optimized food system is finally set up. \textbf{At that time, the ratio of supply demand is extremely close to 1 and the yield is almost constant.} \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/China/China_yield.eps} \caption{Annual Yield of China with Policy Interference.\label{fig:China_yield}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/China/China_equity.eps} \caption{Ratio of Supply and Demand of Food of China with Policy Interference.\label{fig:China_equity}} \end{minipage} \end{figure} Besides, the policy will have a \textbf{negative effect on the income} of the people served in the food system and a \textbf{positive effect on the sustainability} of the food system. Due to the effect of inflation, the short-term decrease of yield will result in a \textbf{continuous attenuation of the income}. Using the model stated in Section \ref{sec:pf}, we can predict the income (Fig. \ref{fig:China_income}). Furthermore, the policy will also \textbf{push the development of science} in order to decrease the labor cost. This in turn improves the production method such that less greenhouse gas emission is necessary for the same yield. Red line in figure \ref{fig:China_GHG} shows the \textbf{accelerated decreasing trend greenhouse gas emission} with policy motivation. \begin{figure}[H] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/China/China_profit.eps} \caption{Income of Food System of China with Policy Interference.\label{fig:China_income}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/China/China_sustainability.eps} \caption{Greenhouse Gas Emission per yield of the Food System of China with Policy Interference.\label{fig:China_GHG}} \end{minipage} \end{figure} Finally, with the economic, social, scientific, and environmental factors taken into account, the food system in China will prioritize equity and sustainability \textbf{after the 15 years} of implementation of the policy. With \textbf{10 more years} of development, the benefits and the costs of the new food system will be manifest. Figure \ref{fig:China_score} shows the change of evaluated \textit{PFSI} with \textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU} marked. Figure \ref{fig:China_radar} shows the standardized score of \textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU} in 2021, 2035 (15 years after policy's implementation), 2045 (25 years after policy's implementation). \begin{figure}[!htb] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/model/China/China_score.eps} \caption{\textit{PFSI} of China with All Four Factors Specified from 2015 to 2045.\label{fig:China_score}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/radar/China_radar.pdf} \caption{The Change of Four Factors of China's Food System.\label{fig:China_radar}} \end{minipage} \end{figure} \subsubsection{Ethiopia: Developing Country with $UR_{2020}$ larger than $OR_{2020}$} Ethiopia is a typical developing country with a high undernourishment rate and low obesity rate. Therefore, the current food supply of Ethiopia is far away from the demand of its possible maximum population. As we change the priority of the food system by introducing the policy motivation, the annual yield will \textbf{quickly increase to satisfy the demand of the population} (Fig. \ref{fig:Ethiopia_yield}). As for the equity, policy motivation \textbf{inhibits increasing speed of equity} to prevent it exceeds the ideal value $1$. Similar to Ethiopia, the income will be affected by the policy so that it \textbf{decreases quickly at the beginning of the policy's implementation}. However, due to the large inflation rate, the \textbf{decreasing speed is much larger} than in China. Besides, the sustainability of Ethiopia will also be improved with the policy motivation. The biggest difference between China and Ethiopia is the value of policy motivation \textit{PM}. To obtain the same effect as the food system of China, Ethiopia needs to \textbf{implement a much stronger policy regulation}. Through our estimation, the value of policy motivation of Ethiopia should be approximately 10 times that of China to get the same effect. In this case study, we choose to use policy motivation \textit{PM}$=1$. \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/Ethiopia/Ethiopia_yield.eps} \caption{Annual Yield of Ethiopia with Policy Interference.\label{fig:Ethiopia_yield}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/Ethiopia/Ethiopia_equity.eps} \caption{Ratio of Supply and Demand of Food of Ethiopia with Policy Interference.\label{fig:Ethiopia_equity}} \end{minipage} \end{figure} Finally, we calculate the line of \textit{PFSI}. After changing the weight of efficiency, profitability, equity, and sustainability, a gap appears in the year 2020. Different from China, the growth rate of Ethiopia's \textit{PFSI} is not that big. Instead, \textit{PFSI} \textbf{increases gradually in the beginning} and \textbf{is stable in year 2030}. Namely, only \textbf{10 years of policy motivation} can bring the underdeveloped food system of Ethiopia back to the right track, the balanced mode. Figure \ref{fig:Ethiopia_radar} shows the standardized score of \textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU} in 2021, 2035, and 2045. \begin{figure}[!htb] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/model/Ethiopia/Ethiopia_score.eps} \caption{\textit{PFSI} of Ethiopia with All Four Factors Specified from 2015 to 2045.\label{fig:Ethiopia_score}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/radar/Ethiopia_radar.pdf} \caption{The Change of Four Factors of Ethiopia's Food System.\label{fig:Ethiopia_radar}} \end{minipage} \end{figure} \subsubsection{The USA: Developed Country with $UR_{2020}$ much less than $OR_{2020}$} The USA is a developed country where the undernourishment rate is very small while the obesity rate is very large. \textbf{Therefore, the current food supply of the USA even exceeds the demand of its possible maximum population.} After we change the priority to sustainability and equity, \textbf{the total yield amount (Fig. \ref{fig:USA_yield}) will generally receive a drop to satisfy the demand of population}, even in the year 2050, there will be a historically lowest point for the yields. Moreover, as the equity index($S/S_m$) (Fig. \ref{fig:USA_equity}) is high at the present yield speed (about 1.4 in 2020), under the control of $PM$(policy motivation, set as 0.1), the equity will drop to 0.98 at the lowest level in the approximately year 2050 and then recover to 1 gradually so that it can compensate the increasing food demand caused by population growth. We can conclude that dropping the unnecessary yield of food can improve the sustainability of the food system, and increase equity in the long run. \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/USA/USA_yield.eps} \caption{Annual Yield of USA with Policy Interference.\label{fig:USA_yield}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.9\textwidth]{figure/model/USA/USA_equity.eps} \caption{Ratio of Supply and Demand of Food of USA with Policy Interference.\label{fig:USA_equity}} \end{minipage} \end{figure} Similar to China, policy motivation will have a \textbf{negative effect on the income} of the people served in the food system due to the effect of inflation and price change. This is reasonable after taking the decrease in food yield into consideration. Besides, the ratio $GHG(t)/S(t)$ is also continuously drop with time. With the motivation of policy ($PM$=0.1), \textbf{the drops will be more quickly} comparing with the slope of $PM=0$. Finally, we can calculate the line of $PFSI$ (figure\ref{fig:USA_score}). After changing the weight of equity, sustainability, profitability, and efficiency, a sudden drop happens in the year 2020. Then, \textbf{$PFSI$ will continuously grow} with its slope continuously drop. In approximately \textbf{the year 2027}, the scoring of $PFSI$ will exceed 0.5, which is the time the food system is optimized for equity and sustainability. In approximately \textbf{the year 2040}, efficiency received its historical highest point, and the differential of $PFSI$ received a sudden drop, which is the time the benefits and cost become manifest. Figure\ref{fig:USA_score} shows the standardized score of $EF$, $PF$, $EQ$, and $SU$ in 2021,2035, and 2045. \begin{figure}[!htb] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/model/USA/USA_score.eps} \caption{\textit{PFSI} of USA with All Four Factors Specified from 2015 to 2045.\label{fig:USA_score}} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth]{figure/radar/USA_radar.pdf} \caption{The Change of Four Factors of USA's Food System.\label{fig:USA_radar}} \end{minipage} \end{figure} %%%%%%%%%%%%%%%%%%%%% \subsection{Task 4: Scalability and Adaptability of the Model} \subsubsection{Scalability} Food systems with different scales are under consideration. After analysis, we make the following conclusions. \begin{itemize} \item \textbf{The larger the food system is, the better our optimization will be}. Given a larger food system (major economies or unions of nations), the production structure of the food system will fit better with the common dietary structure for the residents. The reason is that we do not consider the food trade as well as other food communications between countries, and a larger scale can provide a more self-sufficient system. \item \textbf{Food systems of countries suit best}. Since the model is designed for countries, we consider the yearly inflation rate of a country in the model of profitability. The data processing may be revised if given other systems than countries. \end{itemize} \subsubsection{Adaptability} Food systems with different properties are under consideration. Adaptability reflects the consequence when we change the object of the food system model. We have the following results. \begin{itemize} \item \textbf{The model fits with countries with different $PM$}. For big countries like China and the USA, there is a large population along with strong scientific and technological support. However, for countries which have a smaller population and weaker policy support, the intensity of policy motivation needs to be modified to reflect the difference. \item \textbf{The model reflects the global polarization between rich countries and poor countries}. In Section \ref{sec:EF}, we include the undernourishment rate and the obesity rate into our evaluation model. With regard to Formula \eqref{eq:logi}, if $S_{2020}$ of a country is less than $S_m$, namely the average undernourishment rate is larger than the average obesity rate, then $1-S/S_m$ will be larger than 0. It indicates that the formula adapts to countries with different conditions of rich and poor. \item \textbf{The model is suitable for the comparison among countries with similar production capacity for food}. Since the costs in the production of food are neglected, the closer the production capacity among countries, the more comparable the result of profitability will be. \end{itemize} \section{Sensitivity Analysis}\label{sec:sense} In our model, there are two variables whose value is set by us without derivation. They are the policy motivation \textit{PM} and the relative significance $rs$. \textit{PM} is set to $0.1$ in the cases of China and the USA and is set to $0.5$ in the cases of Ethiopia. $rs$ is set to $7$ in calculating weights of \textit{EF}, \textit{PF}, \textit{EQ}, and \textit{SU}. In this part, we will argue that the results we get from the model are not sensitive to the change of the values of \textit{PM} and $rs$. \subsection{\textit{PM} Fluctuation Test.} Policy motivation $PM$ represents the strength of policy in encouraging the balanced development of the food system. It is first used to calculate the annual yield and equity index in our model for a prioritized food system. We re-calculate the annual yield and equity index with a different value of \textit{PM}, ranging from $0.04$ to $0.4$. The results are shown in Figure \ref{fig:sense_pm}. \begin{figure}[!htb] \centering \subfigure[Re-calculating of Annual Yield of USA with Different Values of \textit{PM}.]{\includegraphics[width=0.48\textwidth]{figure/sense/sense_pm_yield.pdf}} \subfigure[Re-calculating of Equity Index of USA with Different Values of \textit{PM}.]{\includegraphics[width=0.48\textwidth]{figure/sense/sense_pm_equity.pdf}} \caption{Sensitivity Test for Policy Motivation \textit{PM}.\label{fig:sense_pm}} %\vspace{0.2in} \end{figure} Although the value of \textit{PM} varies dramatically, the general shape of the annual yield does not change a lot. Annual yield always has a trend of decreasing to a minimum and increasing to a fixed asymptote. it is the same for the equity index (ratio of supply and demand). No matter how $PM$ swings, it will approach 1 as the year goes to infinity. Thus, we can conclude that the selection of the value \textit{PM} is not a significant issue in our model, since the annual yield and equity index is convergent with respect to \textit{PM}. \subsection{\textbf{\textit{rs}} Fluctuation Test.} $rs$ represents the relative significance of \textit{EQ} (or \textit{SU}) with respect to \textit{EF} (or \textit{PF}). It is used to calculate the weights to get the final score of \textit{PFSI}. To verify that the change of this value dose not significantly incluence the final score, we test different values of $rs$, ranging from $1$ to $8$. Figure \ref{fig:sense_rs} shows the result of the recalculated \textit{PFSI}. Although the change of $rs$ brings a difference to the weights, the final score does not change significantly. Meanwhile, its trend is generally the same. Namely, \textit{PFSI} will increases faster in the beginning, and the speed of increasing decreases at approximately the year 2040. This observation confirms that $PFSI$ is not sensitive to the value of $rs$ in our model. \begin{figure}[h] \centering \includegraphics[width = 0.6\textwidth]{figure/sense/sense_AHP.eps} \caption{Sensitivity Test for $rs$ by Re-calculating the Corresponding \textit{PFSI}.\label{fig:sense_rs}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Strengths and Possible Improvements} \subsection{Strength} \begin{itemize} \item \textbf{Practicability}: To make our model conform to reality, we divide the food into six groups: cereal. fruit, oil crops, vegetable, dairy, and meat. The model considers all the components of the dietary pyramid. \item \textbf{Globality}: We have a macroscopic view of the current food system in the world without taking internal effects into account. \item \textbf{Flexibility}: By introducing a variable parameter named policy motivation ($PM$), we can adjust the priorities of efficiency, profitability, sustainability, and equity accordingly. Assigning $PM$ with different possible values (implying that the intensity of policy motivation may vary), our prediction result can be varied to be more flexible. \end{itemize} \subsection{Possible Improvements} \begin{itemize} \item \textbf{We do not consider imports and exports}. If for a particular country there is a huge difference between the total amount of food imports and exports, the deviation of our model with reality will increase. \item \textbf{Indicators for our model could be more comprehensive}. In the model of equity, we can add another parameter depicting the matching degree of the dietary structure and the production structure, so that differences in the food distribution within countries can be considered. In addition, in the model of sustainability, more indicators except for greenhouse gas emissions may be added to improve the completeness of the model. The cost of food production may also be included to perfect the model. \end{itemize} \section{Conclusions} In this paper, we first adopt the logistic model to predict the population growth from 2020 to 2050. Next, we build four models for evaluating efficiency, profitability, equity, and sustainability respectively. We use differential equations, linear programming, and linear fitting to help to find the solutions of our model. Then we propose an index called Prioritized Food System Index ($PFSI$) to denote the emphasis of the food system. In the discussion part, we analyze the benefits and costs for re-optimizing the food system, do case studies on China, the USA, and Ethiopia, and discuss the scalability and adaptability of the model. Finally, we conduct a sensitivity analysis to show the influence of policy motivation on the food system index and discuss strengths as well as improvements for our work. \bibliographystyle{IEEEtran} \bibliography{mybib} % \newpage \begin{appendices} \section{Supporting Figures} \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/USA/USA_profit.eps} \caption{Income of Food System of USA with Policy Interference.} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/USA/USA_sustainability.eps} \caption{Greenhouse Gas Emission per yield of the Food System of USA with Policy Interference.} \end{minipage} \end{figure} \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/Ethiopia/Ethiopia_profit.eps} \caption{Income of Food System of Ethiopia with Policy Interference.} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/Ethiopia/Ethiopia_sustainability.eps} \caption{Greenhouse Gas Emission per yield of the Food System of Ethiopia with Policy Interference.} \end{minipage} \end{figure} \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/USA/USA_GHG_fit.eps} \caption{Linear Fit of the Greenhouse Gas Emission of USA.} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/USA/USA_yield_fit.eps} \caption{Linear Fit of the Annual Yield of USA.} \end{minipage} \end{figure} \begin{figure}[htbp] \centering \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/China/China_GHG_fit.eps} \caption{Linear Fit of the Greenhouse Gas Emission of China.} \end{minipage} \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width=0.7\textwidth]{figure/model/China/China_yield_fit.eps} \caption{Linear Fit of the Annual Yield of China.} \end{minipage} \end{figure} \begin{figure} \centering \includegraphics[width = 0.5\textwidth]{figure/model/Ethiopia/Ethiopia_pplt.eps} \caption{Logistic Fit of Population of Ethiopia.} \end{figure} % \section{Code Example} % % \lstset{ % % basicstyle = \sffamily, % 基本代码风格 % % keywordstyle = \bfseries, % 关键字风格 % % commentstyle = \rmfamily\itshape, % 注释的风格,斜体 % % stringstyle = \ttfamily, % 字符串风格 % % flexiblecolumns, % 别问为什么,加上这个 % % numbers = left, % 行号的位置在左边 % % showspaces = false, % 是否显示空格,显示了有点乱,所以不现实了 % % % numberstyle = \zihao{-5}\ttfamily, % 行号的样式,小五号,tt等宽字体 % % showstringspaces = false, % % captionpos = t, % 这段代码的名字所呈现的位置,t指的是top上面 % % frame = lrtb, % 显示边框 % % } % \vspace{-2em} % \lstset{frame=tb, % language=Python, % aboveskip=3mm, % belowskip=3mm, % showstringspaces=false, % columns=flexible, % basicstyle={\small\ttfamily}, % numbers=none, % numberstyle=\tiny\color{gray}, % keywordstyle=\color{blue}, % % commentstyle=\color{dkgreen}, % % stringstyle=\color{mauve}, % breaklines=true, % breakatwhitespace=true, % tabsize=3 % } % % \lstdefinestyle{Python}{ % % language = Python, % 语言选Python % % basicstyle = \tt % % % numberstyle = \zihao{-5}\ttfamily, % % keywordstyle = \color{blue}, % % % keywordstyle = [2] \color{teal}, % % stringstyle = \color{magenta}, % % commentstyle = \color{red}\ttfamily, % % breaklines = true, % 自动换行,建议不要写太长的行 % % columns = fixed, % 如果不加这一句,字间距就不固定,很丑,必须加 % % basewidth = 0.5em, % % } % % \lstinputlisting[title = Grey Forecast]{code/GreyPre.py} % \lstinputlisting[title = Moving Entropy Weight Method]{code/EWM.py} % \lstinputlisting[title = Grey Forecast]{code/GreyPre.py} \end{appendices} \end{document}
{ "alphanum_fraction": 0.7302206608, "avg_line_length": 77.4047619048, "ext": "tex", "hexsha": "2762d2f72de74fd6f521dc6ead1d433241fbe05e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5533ff4f0606a1ad20a57a8e964ced6a4c026c18", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zyxum/2021-ICM", "max_forks_repo_path": "main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5533ff4f0606a1ad20a57a8e964ced6a4c026c18", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zyxum/2021-ICM", "max_issues_repo_path": "main.tex", "max_line_length": 1618, "max_stars_count": null, "max_stars_repo_head_hexsha": "5533ff4f0606a1ad20a57a8e964ced6a4c026c18", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zyxum/2021-ICM", "max_stars_repo_path": "main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 16261, "size": 61769 }
\section{Background information} We are in the 21st century and space travel is all the more a reality now than it was a century prior. There is an expanded requirement for knowledge on how movement outside our earth can be ventured into using safe routes with an exactness that is of the micrometer-scale. To make this possible, researchers have appropriately gathered data of heavenly bodies motions and extraterrestrial environments. With these equations that describe with accuracy the movement of the bodies were derived. Using this information mankind has been able to send vehicles into space. This has prompted space exploration and all the more significantly space correspondence. Space communication involves the satellites that have been sent into space in the endeavor to narrow the communication gap on the earth below. This has brought about application GPS and Satellite correspondence (which includes the exchange of information through satellites and ground stations). In this task we expect to explore on satellite - earth material science by determining conditions that will suitably portray this framework and by making a straightforward model of the same. We will reach determinations on whether Kenya as nation should wander into space by completing a cost assessment and different variables. \section{Statement of the problem} Kenya, like many other countries in Africa, has not been able to send a satellite into space despite being one of the leading nations in Africa. Kenya is a huge consumer of satellite technology owing to the number of corporations in this country who depend on it to run their activities. But this satellite technology is outsourced. What does Kenya lack that it is not able to send satellites into space? However, it is only until recently that Kenya was able to send its first pico-satellite into space in a joint venture with Japan. This was done in collaboration with the University of Nairobi. This heralded a new dawn with Kenya having an interest in space. We are going to look into this matter from an academic angle inquiring the physics involved in the operation of sending a satellite in space and keeping it in space. Due to the scope of this research and the time given, we will focus mainly on the physics of keeping a satellite in orbit. What are the equations involved in keeping a satellite in motion around the planet earth? What is the cost, in monetary terms, involved in keeping a satellite in orbit? Is Kenya able to meet this specifications? \section{Justification and Significance of the Study} Kenya is on the verge of constituting a space agency of which the administration of sending satellites into space on behalf of Kenya will be its prerogative. After many years of absence in the space scene will Kenya be able to handle these leap onto new grounds? In this research we will look into answering these questions. We will develop a system of equations that will be used to develop simulations that will show that Kenya is better prepared to launch into space. This study will also show what Kenya might expect to incur in terms of the monetary costs on achieving the objective of sending a satellite into space hence avoiding the factor of surprise when Kenya begins to dig deep into the pocket. \section{Objectives} \subsection{Main Objective} To perform a computer model simulation of an isolated Earth-Satellite system based on the two-body problem astrodynamics and compare simulated data with past collected to prove the correctness of the model. \subsection{Specific Objectives} 1. Review the physics of the two-body problem.\\ 2. Build a computer model for a Earth satellite system in 3-dimensions.\\ 3. Apply the model for the Kenyan satellite 1KUNS-PF. \section{Methodology} This research will involve deriving equations simplifying the two-body problem which will later be encapsulated in one equation. This equation will be changed into a computer program that will simulate the interaction between the earth and the satellites that move around it. This simulation will be done in 3 dimensions using Python as it is rich in scientific libraries that will enable the simulation. We will then proceed to determine the accuracy of the model.
{ "alphanum_fraction": 0.7873210634, "avg_line_length": 220.05, "ext": "tex", "hexsha": "c7dcf4ff1681318d1a00090f6d57c33e20847aa6", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-06-18T17:53:48.000Z", "max_forks_repo_forks_event_min_datetime": "2020-03-20T07:18:27.000Z", "max_forks_repo_head_hexsha": "b40f0018960891ae59fd2eb970a94427b9319e67", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Sylvance/two-body-problem-simulation", "max_forks_repo_path": "thesis/chapters/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b40f0018960891ae59fd2eb970a94427b9319e67", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Sylvance/two-body-problem-simulation", "max_issues_repo_path": "thesis/chapters/introduction.tex", "max_line_length": 1282, "max_stars_count": 1, "max_stars_repo_head_hexsha": "b40f0018960891ae59fd2eb970a94427b9319e67", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Sylvance/two-body-problem-simulation", "max_stars_repo_path": "thesis/chapters/introduction.tex", "max_stars_repo_stars_event_max_datetime": "2019-03-13T14:29:54.000Z", "max_stars_repo_stars_event_min_datetime": "2019-03-13T14:29:54.000Z", "num_tokens": 814, "size": 4401 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{REFERENCES, QUOTINGS AND FOOTNOTES}\label{Ch4} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In this section, information will be given about how citations, quotings and footnotes should be. \section{Citing (indication of references in main text body)} \subsection{Citing according to surname of author} References are cited with the surname of author and year. In the references section, the references are listed alphabetically according to the surname of the author. Citing of a reference at the beginning of or within a sentence must be as Boran (2003), whereas a citation at the end of a sentence must be as (Boran, 2003). The full-stop is placed directly after the citation. A reference with two authors must be cited as Yılmaz and Johnson (2004) at the beginning of or within a sentence, or as (Yılmaz and Johnson, 2004) at the end of a sentence. A reference with more than two authors must be cited as Yılmaz et al. (2004) at the beginning of or within a sentence, or as (Yılmaz et al, 2004) at the end of a sentence. Different publications of an author published in the same year must be cited as Feray (2005a), Feray (2005b). While citing a part of a publication; the number of the page the cited material (chapter, table, figure, or equation) is on must be indicated. While citing, the expression “page” must be abbreviated, but “chapter” must not. For example; (Centers for Disease Control and Prevention, 2005, p. 10), (Shimamura, 1989, Chapter 3). Citing multiple publications in one pair of brackets; (Berndt, 2002; Harlow, 1983). Citing personal communication in main text body; (V.–G. Nguyen, personal communication, September 28, 1998), (J. Smith, personal communication, August 15, 2009). In the references section, reference tags must be listed according to the surname of author. For citing of secondary references (In case the reference cites another reference), the secondary reference must be cited in brackets. In the references section, the reference tag is organized according to the secondary reference, the original reference must not be used as a tag. For example; In his e-mails, Smith argued that asynchronous line dancing would be the next Internet meme (as cited in Jones, 2010). \subsection{Citing according to order of appearance} References are cited by numbering and indicating the number in square brackets ([]) in the main text body. The first reference cited in a thesis is numbered [1] and the following references are numbered according to the order of appearance. In the main text body, references must be cited as specified below: \vspace*{-12pt} \begin{tabbing} \hspace*{1.5cm}\= \kill [1] \> Reference no. 1\\ [1--3] \> References from no.1 to 3 (thus, references 1,2 and 3)\\ [1,3] \> References no. 1 and 3\\ [1,3,8] \> References no.1, 3 and 8\\ [1,3--8] \> References no.1, and from no.3 to 8 (thus, references 1, 3, 4, 5, 6, 7 and 8) \end{tabbing} \vspace*{-12pt} Different volumes of a reference must be cited and numbered individually. \section{Quoting} Generally, quoting is done by remaining faithful to the original text in terms of words, spelling and punctuation. In case there is a mistake, the correct version is written in square brackets in the quoted text. Short quotations (not longer than 40 words) must be given in quotation marks. Following the text quoted, the reference must be written and a full-stop must be placed afterwards. Quotations longer than 40 words must not be shown in quotation marks. Instead, they must be indented 1 tab space (1.27 cm) from the left side of the page. The font size for long quotations indented from the left must be 2 pt smaller than the font size used in main text body. However, it is not advised to quote very long texts and to quote very frequently. Unlike short quotations, references of long quotations must be placed after the full stop. (i.e., .(p.196)) Example for a quotation at the beginning of a sentence; According to Jones (1998), "Students often had difficulty using APA style, especially when it was their first time" (p. 199). Example for a quotation in the middle of a sentence; Interpreting these results, Robbins et al. (2003) suggested that the “therapists in dropout cases may have inadvertently validated parental negativity about the adolescent without adequately responding to the adolescent’s needs or concerns” (p. 541) contributing to an overall climate of negativity. Example for a quotation at the end of a sentence; Confusing this issue is the overlapping nature of roles in palliative care, whereby “medical needs are met by those in the medical disciplines; nonmedical needs may be addressed by anyone on the team” (Csikai \& Chaitin, 2006, p. 112). Detailed information on quoting could be found on websites of Graduate Schools and associated links. \section{Footnotes} Footnotes could be used in theses to add content-expanding, content-enhancing, or additional information. Footnote numbers must be placed directly after a quotation. In case the quotation is a paragraph, the footnote numbers must be placed directly after the last word of the paragraph (as superscript). In case the quotation is a concept or a noun, footnote numbers must be placed directly after that concept or noun (as superscript). Footnote numbers in the main text body must be indicated as superscript, as shown\footnotemark. A punctuation mark must not be placed after the number. Footnotes must be written with a font size 2 pt smaller than the main text body font size. 1 space must be set between footnote line and footnote number, 1/2 space must be set between footnote number and the first line of the footnote. Footnotes must be separated from the main text body with a thin horizontal line. Detailed information on footnotes could be found on the websites of Graduate Schools and associated links. \footnotetext{~Reference display can not be done with footnotes.~Footnotes could be used in theses to add content-expanding, content-enhancing, or additional information.~If these information must include references, these references must be indicated in References section.} \section{Second Level Title: First Letters Capital} Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gub rgren, no sea. \subsection{Third level title: Only first letter capital} Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gub rgren, no sea. \subsubsection{Fourth level title: Only first letter capital} Stet clita kasd gub rgren, no sea takimata sanctus est Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut lab ore sit et dolore magna. \subsubsubsection{Fifth level title: No numbering after fourth level titles} Stet clita kasd gub rgren, no sea takimata sanctus est Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut lab ore sit et dolore magna\footnotemark. % Include tilda to provide one letter spacing between the foot number and the text at the bottom - SBÖ \footnotetext{~~Footnotes must be written with a font size 2 pt smaller than the main text body font size.} \begin{figure}[t] \centering \includegraphics[width=230pt,keepaspectratio=true]{./fig/sekil6} % sekil6.eps: 0x0 pixel, 300dpi, 0.00x0.00 cm, bb=14 14 555 489 \caption{Example figure.} \label{Figure4.1} \end{figure} This indicates that the ANN is accurate at base flow and flow height values lower then 3 m. \begin{table*}[h] {\setlength{\tabcolsep}{14pt} \caption{Example table.} \begin{center} \vspace{-6mm} \begin{tabular}{cccc} \hline \\[-2.45ex] \hline \\[-2.1ex] Column A & Column B & Column C & Column D \\ \hline \\[-1.8ex] Row A & Row A & Row A & Row A \\ Row B & Row B & Row B & Row B \\ Row C & Row C & Row C & Row C \\ [-0ex] \hline \end{tabular} \vspace{-6mm} \end{center} \label{Table4.1}} \end{table*} Stet clita kasd gub rgren, no sea takimata sanctus est Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut lab ore sit et dolore magna. Stet clita kasd gub rgren, no sea takimata sanctus est Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut lab ore sit et dolore magna. Stet clita kasd gub rgren, no sea takimata sanctus est Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut lab ore sit et dolore magna.
{ "alphanum_fraction": 0.7574196439, "avg_line_length": 64.2374100719, "ext": "tex", "hexsha": "094846fef9c371978b5ed3b83ced01ff2d8ccb99", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "03ba2693166e91096088b88c0c8c173c7f8db7fd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "CarlkD/GraduationProject", "max_forks_repo_path": "Chapter_Examples/chapter_04.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "03ba2693166e91096088b88c0c8c173c7f8db7fd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "CarlkD/GraduationProject", "max_issues_repo_path": "Chapter_Examples/chapter_04.tex", "max_line_length": 466, "max_stars_count": null, "max_stars_repo_head_hexsha": "03ba2693166e91096088b88c0c8c173c7f8db7fd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CarlkD/GraduationProject", "max_stars_repo_path": "Chapter_Examples/chapter_04.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2301, "size": 8929 }
\chapter{E2E Evaluation Metrics}\label{ch:metrics} \section{Audio Metrics} \subsection{SNR - Signal to Noise Ratio} The signal-to-noise ratio (SNR) metric evaluates how distinct the desired signal is out of the overall noise. Let \(y(t)\) denote a mixed time-domain signal consisting of the desired speech signal, and some interferences referred to as noise. That mixture is given by: \begin{align} y(t) = x(t) + n(t) \end{align} Where \(x(t)\) and \(n(t)\) denote the speech signal and the interference noise. Ideal speech separation of the mixture is characterized by a perfect match between the predicted speech signal, \(\widehat{x}(t)\), and the original (reference) speech signal \(x(t)\). % Separating the speech out of the mixture, % the predicted speech signal, \(\widehat{x(t)}\), has to match the % original (target) speech signal \(x(t)\). Such a problem can be modeled and optimized by the MSE (L2) (Mean Square Error) loss function as follows: \begin{align} \ell(\widehat{x}, x) & = \sum_{t=0}^{T-1} \left[\widehat{x}(t) - x(t)\right]^{2} \\ & = \sum_{t=0}^{T-1} |r(t)|^{2} \end{align} The term \(\sum_{t} |r(t)|^{2}\) is the total energy of the residual error between the predicted signal and the target speech, which translates to additive noise. First, let's break \(\widehat{x}(t)\) to its fundamental components\cite{1643671}. \begin{align} \widehat{x}(t) & = x_{_{s}} + e_{_{noise}} + e_{_{interf}} + e_{_{artif}} \end{align} Where \(x_{_{s}}\) stands for the part of \(\widehat{x}(t)\) coming from the wanted source(s), and \(e_{_{noise}}\) represents the part of \(\widehat{x}(t)\) coming from the sensor's noise. The sensor can be the microphone itself or one of its counterparts. \(e_{_{interf}}\) notes the unwanted sources presented in \(\widehat{x}(t)\), and the \(e_{_{artif}}\) represents any other artifacts that cause distortions in the prediction of \(x_{_{s}}\). According to Parseval's theorem, the residual energy in time equals the sum of the power difference between the predicted magnitude and the magnitude of target speech in the frequency domain\cite{1643671009}. \begin{align} \sum_{t} |r(t)|^{2} & = \frac{1}{T}\sum_{\tau=0}^{T-1}\sum_{f=0}^{T-1} \left[ \widehat{X}(\tau, f) - X(\tau, f)\right]^{2} \end{align} Since the residual energy is referred to as the noise, minimizing the residual, which is minimizing the MSE loss function, translates into an increase in SNR. The SNR is therefore given by: \begin{align}\label{eq:snr_equation} SNR & = 10\log_{10} \left( \frac{ \| x_{_{s}}\|^{2}}{\|\widehat{x} - x_{_{s}} \|^{2}} \right) \nonumber \\ & = 10\log_{10} \left( \frac{ \| x_{_{s}} \|^{2}}{\| r \|^{2}} \right) \end{align} \subsection{SI-SNR --- Scale Invariant SNR} To ensure that the SNR is susceptible to scale invariance\cite{roux2018sdr}, both the target and estimated signals are normalized to \underline{zero-mean}. \begin{align} SI-SNR & = 10\log_{10} \left( \frac{\left\| x_{_{s}} - \mathbf{E}[x_{_{s}}]\right\|^{2}} {\left\| (\widehat{x} - \mathbf{E}[\widehat{x}]) - (x_{_{s}} - \mathbf{E}[x_{_{s}}]) \right\|^{2}} \right) \nonumber \\ & = 10\log_{10} \left( \frac{ \| x_{_{AC}}\|^{2}}{\|\widehat{x}_{_{AC}} - x_{_{AC}}\|^{2}} \right) \nonumber \\ & = 10\log_{10} \left( \frac{ \| x_{_{AC}}\|^{2}}{\| r_{_{AC}} \|^{2}} \right) \end{align} \subsection{Segmental SNR} An SNR evaluation is basically the ratio between the overall energies of the signal and those in the noise. However, some portions of the signal are almost pure noise, especially in the case of speech signals, where there are gaps between phonemes, articulation stops, and air aspiration breaks. As a result, the SNR calculation may be impacted, and it actually depends on the length of the empty sections with respect to the length of the other sections where speech is present. With Segmental SNR\cite{10.5555/912256}, instead of taking the entire signal, the signal is segmented to relatively small chunks (segments), each in length usually set to \(25ms\) long with the option of setting an overlap between segments. Then, each SNR of each segment is extracted and, finally, are averaged. If the energy of the speech reference in a segment is negligible, that segment is excluded, thus limiting the evaluation only to sections where speech is present. Equation~\ref{eq:snr_equation} can be extended to: \begin{align} SEG-SNR & = \frac{1}{M}\sum_{m=1}^{M} 10\log_{10} \left( \frac{ \| x_{_{s}} \|^{2}_{(m)}}{\| r \|^{2}_{(m)}} \right) \end{align} Where \(M\) denotes the number of segments the signal is divided by. Despite being more accurate for speech signals, Segmental SNR suffers from a limitation that can affect the actual results severely. In speech enhancement evaluations, the signal's predicted (enhanced) version is compared to a clean reference signal concerning the noisy mixture. Unfortunately, speech analysis for the extraction of the Segmental SNR causes misalignments between the reconstructed signal and the clean reference. Moreover, the reconstructed signal is not aligned with the noisy mixture either. These misalignments are a side effect of the time-domain to the frequency-domain transformation, the processing manipulations on the transformed signal, and the reconstruction of the signal in the time-domain using the inverse-transform technique. Therefore, without any alignments, extraction of the Segmental-SNR is meaningless and most probably inaccurate. Due to that limitation, an alignment process should be applied prior to taking the Segmental SNR calculation. These alignments usually have a small marginal error that spans over a few sampling points. \subsection{STOI --- Short-Time Objective Intelligibility} STOI\cite{5495701} is a metric that is used to evaluate the intelligibility of a speech signal. The intelligibility is measured by taking the correlation coefficient between the temporal envelopes of the clean and degraded speech. In our case, the term degraded might be confusing since the degraded speech input is actually the outcome of the beamformer following the T-F masking at the front-end. However, relatively to the clean speech, the beamformer's output is indeed degraded, altough being considered as an enhanced version of the noisy mixture. The naming convention \emph{Short-Time} comes from the time frame length of the overlapping segments, which is \(384 ms\). \begin{figure}[H] \centering \includegraphics[width=\linewidth]{Features/images/stoi_blocks_diagram} \caption{STOI flow diagram}\label{fig:stoi_blocks_diagram} \source{Adapted from \cite{5495701}} \end{figure} The STOI algorithm structure is demonstrated in the blocks diagram shown in Figure\;\ref{fig:stoi_blocks_diagram}. The short-time temporal envelop of the degraded speech \(Y_{_{j,m}}\) is clipped and normalized before the extraction of the correlation coefficient with the short-time temporal envelop of the clean speech \(X_{_{j,m}}\). This clipped normalized version then be: \begin{align} \mathcal{Y}[n] & = \min\Bigg\{ \frac{||X_{_{j,m}}||}{||Y_{_{j,m}}||} Y_{_{j,m}}[n] ,\; (1+10^{\sfrac{-\beta}{20}})X_{_{j,m}}[n] \Bigg\} \end{align} Thus, the correlation coefficient can be noted as the distance given in Equation\;\ref{eq:stoi_correl}. \begin{align}\label{eq:stoi_correl} d_{_{j,m}} & = \frac{ (X_{_{j,m}}-\bar{X}_{_{j,m}})^{tr} \cdot (\mathcal{Y}_{_{j,m}}-\bar{\mathcal{Y}}_{_{j,m}}) } { ||X_{_{j,m}}-\bar{X}_{_{j,m}}|| \cdot ||\mathcal{Y}_{_{j,m}}-\bar{\mathcal{Y}}_{_{j,m}}|| } \end{align} Also, defining the intermediate intelligibility measure, Equation\;\ref{eq:stoi_correl}, it stands for the \(m^{th}\) time frame. Extending it to form a definition for the entire signal, we can take the average of \(d_{_{j,m}}\) as in Equation\;\ref{eq:stop_dist_avg}. \begin{align}\label{eq:stop_dist_avg} d & = \frac{1}{JM} \sum_{j,m} d_{_{j,m}} \end{align} Where \(J\) presents the total number of one-third octave bands, and the averaging overlaps \(M\) number of time frames. \subsection{PESQ --- Perceptual Evaluation of Speech Quality} PESQ\cite{941023} is a measuring method adopted by the ITU (International Telecommunication Union) to test the speech quality of telephony and mobile stations. This measuring metric evolved from different previous measuring techniques such as Bark Spectral Distortion (BSD), Perceptual Analysis Measurement System (PAMS), and Perceptual Speech Quality Measure (PSQM). The motivation behind the development of the PESQ metric was the need to assess the speech quality in an E2E communication channel that considers the entire link rather than particular parts. The evaluation of a speech signal quality by PESQ follows the MOS (Mean Opinion Scores) model, where the actual speech quality is ranked in-between 1 to 5 by a group of listeners. Figure \ref{fig:pesq_blocks_diagram} shows the data flow of the PESQ computation for a predicted signal, with respect to the clean reference. \begin{figure}[H] \centering \includegraphics[width=0.85\linewidth]{Features/images/pesq_blocks_diagram_new} \caption{PESQ Algorithm Blocks Diagram}\label{fig:pesq_blocks_diagram} \source{Adapted from PESQ paper\cite{941023} and redesigned} \end{figure} \section{ASR Metrics} \subsection{WER - Word Error Rate} WER\cite{KLAKOW200219} metric is probably the most used evaluation technique for speech recognition systems. Evaluation of this metric occurs at the ASR engine's output, where the predicted text is segmented into sentences. Each word in the predicted text is then matched with its counterpart in the annotated reference transcript. The sum of mismatches between a predicted sentence and the reference, divided by the total counted words in the reference, indicates the WER. However, in some cases, the predicted sentences differ in size compared to the reference. Therefore, special care for \emph{Insertions} and \emph{Deletions} should be carried out as well, without neglecting the detected \emph{Substitutions}. The WER is described by: \begin{align} WER & = \frac{S + D + I}{N} \end{align} Where \(N\) is the total count of words in the reference, and \(S,D,I\) are the number of \emph{Substitutions} (wrong word detection), \emph{Deletions} (Omitting words), and \emph{Insertions} (Wrong words insertions). \subsection{CER - Character Error Rate} CER is another metric with some similarities to the WER evaluation metric but with a narrower resolution. The change in resolution is due to comparing characters instead of words. The same rules of \emph{Substitutions}, \emph{Deletions}, and \emph{Insertions} apply, and therefore, the calculation of the CER is the same: \begin{align} CER & = \frac{S + D + I}{N} \end{align} In many cases, the CER\cite{_isword} is a complementary measuring metric to the WER metric. This extra measure shines especially when there is a need to get a full perspective with a greater differentiation capability of the \emph{Substitutions} in the complete sentence of the suggested predicted transcript, also known as the hypothesis. While WER counts a mismatch between the reference and the predicted word, even in cases where only single characters or worse, punctuation marks are not correctly placed, CER can lead to more accurate grading per word. \section{HW Metrics} % \subsection{Computation time} % \subsection{Utilization Ratio} \subsection{Power Estimation} Electrical circuits, components, and systems require power to function. The amount of power a device consumes from the power sources is subject to various parameters and mainly describes the rate of energy delivery from the source to the device or vice versa. Due to the nature of conducting materials, whenever an electrical potential is applied between the conductor's terminals, electrical current goes through the conductor. The current that flows in the system feeds the different components with energy. However, the total supplied energy is not purely consumed over time, and some energy is lost and wasted due to power dissipation. Power dissipation is a side effect of a conductors' resistive nature, which "resists" the transition of current through it. As a result, part of the energy in the system is converted to heat energy. Since dissipated power is a waste of energy it is also considered as one of the main causes to electronic systems' performance degradation at high temperatures. Therefore, engineers want to mitigate as much as possible any dissipated power that is not used for the main functionality of the system. % but dissipated and converted to heat. For that end, power analysis is crucial in any system design phase to ensure efficiency and correctness while maintaining robustness over time and under different working conditions. Electrical circuit power dissipation depends on many arguments. % mainly when speaking of digital logic designs. However, in general, it can be modeled accurately according to three scenarios divided into two main groups: \begin{enumerate} \item Static Power \begin{itemize} \item Intrinsic Leakage Power \end{itemize} \item Dynamic Power \begin{itemize} \item Internal Power \item Switching Power \end{itemize} \end{enumerate} \subsubsection{Intrinsic Leakage Power} Leakage power is the power that dissipates due to the structure of a CMOS device, where a thin layer of metal oxide isolates between the semiconductor material and the gate metal and thus forming a capacitor. Leakage power dissipates statically regardless of the CMOS device state, whether it is the active state or the off state (idle). \begin{figure}[H] \centering \includegraphics[width=0.75\linewidth]{Features/images/leak_power_schem} \caption{Leakage Power Illustration}\label{fig:leak_power_schem} \source{Adapted from Synopsys PrimePower Suit documentation\cite{lowpowerSoc}} \end{figure} Figure \ref{fig:leak_power_schem} describes three current leakages, the reverse bias current of the diode (p-n junction), sub-threshold current leakage, and the gate leakage. With the recent advancement in process technologies, CMOS devices are minimized in size, but the leakage power is increasing as a side effect. \begin{figure}[H] \centering \includegraphics[width=0.55\linewidth]{Features/images/leak_vs_nm} \caption{Leakage Power vs. Process Technology}\label{fig:leak_vs_nm} \source{Adapted from Soitec FinFet presentation\cite{processnodeleak}} \end{figure} The overall leakage power is a function of the total number of voltage sources and their voltage levels, the physical dimensions of the CMOS device, and the threshold value set to switch between on-off states. \begin{align} P_{leak} & = \mathcal{F} (V_{_{DD}}, V_{_{th}}, \frac{\mu_{n}\varepsilon_{ox} W}{L}) \end{align} \subsubsection{Internal Power} A CMOS device is a formation of two complementary MOS transistors, a p-type and an n-type, formed together as a symmetrical pair unit. Internal power dissipation happens due to the structure of CMOS devices. Whenever a transition at the CMOS gate occurs, both the NMOS and the PMOS drivers are active for a relatively small duration of time. As a result, a short circuit is formed directly from the power rail to the ground. Although not lasting for long periods of time, the amount of internal dissipated power in highly toggled designs becomes significant over time. To minimize the internal dissipated power, or in other words, minimizing the time duration where both devices are active and current flows from Vdd to GND, the transition times (both rising and falling) are set to be very fast. \begin{figure}[H] \centering \includegraphics[width=0.75\linewidth]{Features/images/int_power_schem} \caption{Internal Power Illustration}\label{fig:int_power_schem} \source{Adapted from Synopsys PrimePower Suit documentation\cite{lowpowerSoc}} \end{figure} \subsubsection{Switching Power} Switching power is the power dissipated as a result of charging and discharging loads during transitions. MOS devices introduce capacitance at their input gates due to their structure. Thus, whenever a low-to-high transition at the output occurs, the driver pushes the current to charge the capacitive load in order to set the desired logic level voltage. Likewise, the load capacitance discharge and sink into the device through the PMOS transistor to the ground for a high-to-low transition at the output. As a result, the charging and discharging currents eventually dissipate and are not delivered to the external load. \begin{figure}[H] \centering \includegraphics[width=0.75\linewidth]{Features/images/sw_power_schem} \caption{Switching Power Illustration}\label{fig:sw_power_schem} \source{Adapted from Synopsys PrimePower Suit documentation\cite{lowpowerSoc}} \end{figure}
{ "alphanum_fraction": 0.7444541485, "avg_line_length": 40.8928571429, "ext": "tex", "hexsha": "0b29e978dfdcca3fe2bac13dd500da31a4f4a83d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d710d21cd18a5f5c01acf65d85c8ef364a3ca219", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "aviadb/research", "max_forks_repo_path": "Dissertation/Features/metrics.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d710d21cd18a5f5c01acf65d85c8ef364a3ca219", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "aviadb/research", "max_issues_repo_path": "Dissertation/Features/metrics.tex", "max_line_length": 126, "max_stars_count": null, "max_stars_repo_head_hexsha": "d710d21cd18a5f5c01acf65d85c8ef364a3ca219", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "aviadb/research", "max_stars_repo_path": "Dissertation/Features/metrics.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4487, "size": 17175 }
% --------------------------------------------------- % Date: 12.12.2018 % Version: v0.1 % Autor: Felix Faltin <ffaltin91[at]gmail.com> % Repository: https://github.com/faltfe/iodhbwm % --------------------------------------------------- % --- --- --- --- -- Class options -- --- --- --- --- % --------------------------------------------------- \documentclass[ load-dhbw-templates, % Allow \dhbw* commands add-tocs-to-toc, % Add LoF, LoT, etc. to ToC add-bibliography, % Include bibliography (needs biber run) bib-file = biblatex-examples.bib, % Set bibliography file auto-intro-pages = all, % Takes care about titlepage, abstract, ToC, etc. language = english, % Provide another language for the abstract language = ngerman, % Set main document language debug % Provide \lipsum, \blindtext ]{iodhbwm} \usepackage[T1]{fontenc} % --------------------------------------------------- % --- --- --- --- - Necessary setup - --- --- --- --- % --------------------------------------------------- \dhbwsetup{% abstract = my-abstract.inc, % Include custom abstract file author = Max Mustermann, thesis type = BA, thesis title = Überprüfung von Bausteinen, student id = 1337, location = Transsilvanien, institute = Lebkuchenhaus, institute logo = example-image-a, course/id = Txxxx, supervisor = Schneewittchen, processing period = {01.01.17 -- 31.01.17}, reviewer = Frau Holle und die sieben Zwerge, course/name = {Trolltechnik}, bachelor degree type = {Troll of the universe}, % bachelor degree = BoS % Bachelor of Science } % --------------------------------------------------- % --- --- --- --- Begin actual content -- --- --- --- % --------------------------------------------------- \begin{document} \Blinddocument \chapter{Tabellen} \blindtext \begin{table}[htb] \centering \begin{tabular}{@{}ll@{}} \toprule Linke Spalte & Rechte Spalte\\\midrule \dots & \dots\\ &\\ &\\\bottomrule \end{tabular} \caption{Leere Tabelle} \end{table} \blindtext \chapter{Abbildungen} \blindtext \begin{figure}[htb] \centering \includegraphics[width=.5\linewidth]{example-image-a} \caption{Random image} \end{figure} \blindtext % Cite all elements from the passed file % This is necessary because otherwise there % won't be any bibliography generated. \nocite{*} \end{document}
{ "alphanum_fraction": 0.4518871132, "avg_line_length": 35.8658536585, "ext": "tex", "hexsha": "7969ac691816f224e8352c16197e00145786fe36", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0080586040b9a2c0ed3b0deba7b7f52566e04ba3", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "bishopcranmer/iodhbwm", "max_forks_repo_path": "doc/examples/abstract/iodhbwm-auto-sections-with-abstract.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0080586040b9a2c0ed3b0deba7b7f52566e04ba3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "bishopcranmer/iodhbwm", "max_issues_repo_path": "doc/examples/abstract/iodhbwm-auto-sections-with-abstract.tex", "max_line_length": 95, "max_stars_count": null, "max_stars_repo_head_hexsha": "0080586040b9a2c0ed3b0deba7b7f52566e04ba3", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "bishopcranmer/iodhbwm", "max_stars_repo_path": "doc/examples/abstract/iodhbwm-auto-sections-with-abstract.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 659, "size": 2941 }
\documentclass[10pt]{article} % \usepackage{amsmath} \usepackage{listings} \usepackage{url} \setlength{\parindent}{0pt} \usepackage[parfill]{parskip} % \usepackage{fullpage} \usepackage[margin=1.5in]{geometry} \usepackage[ pdfpagemode=UseOutlines, pdfdisplaydoctitle=true, colorlinks=true, linkcolor=black, filecolor=black, pagecolor=black, urlcolor=black, frenchlinks=true ]{hyperref} \usepackage{tocloft}% http://ctan.org/pkg/tocloft \setlength{\cftsubsecnumwidth}{3.5em}% Set length of number width in ToC for \subsection \title{pyOpenMS 2.7} \author{OpenMS Development Team} \date{} \begin{document} \maketitle % TODO update numbers from time to time pyOpenMS is a set of Python bindings of the C++ OpenMS library. It allows to access a large number of objects (350+) and functions (3900+) of the C++ code directly from Python. The main functions of the library are explained in the first section of this manual. A list of all wrapped functions can be found in the appendix of this manual. Since all functions in Python directly call C++ and their function signature usually corresponds to the one in C++, the OpenMS documentation is for most cases the most complete and up-to-date reference also and applies directly to pyOpenMS. In this manual, only differences to the existing documentation will be highlighted and some general usecases will be explained. The link to the documentation of the latest release can be found here: \url{http://open-ms.sourceforge.net/documentation/}. \tableofcontents \pagebreak The following section will explain the most important functions of pyOpenMS in more detail with full examples of Python code that can be directly executed. \section{File Input/Output} pyOpenMS supports file input and output for various formats. These formats include \texttt{DTA2D, DTA, EDTA, FeatureXML, Kroenik, MzData, MzIdentML, IdXML, mzML, mzXML, PepXML, ProtXML, TraML, XTandemXML}. \subsection{Common Pattern} Most file format objects follow the following idiome \begin{verbatim} from pyopenms import * file = FileObject() exp = MSExperiment() file.load(filename_in, exp) # process data file.store(filename_in, exp) \end{verbatim} where \texttt{FileObject} can be any of \texttt{DTA2DFile, DTAFile, mzXMLFile, mzMLFile \ldots } (note that the above code will not work directly since you will need to use a specific file implementation, see below). For \texttt{EDTAFile}, you must load and store a \texttt{ConsensusMap} instead of an \texttt{MSExperiment}. for \texttt{FeatureXMLFile}, you must load and store a \texttt{FeatureMap} instead of an \texttt{MSExperiment}. \subsection{IdXML} \begin{verbatim} from pyopenms import * id_file = IdXMLFile() filename_in = "input.IdXML" filename_out = "output.IdXML" protein_ids = [] peptide_ids = [] id_file.load(filename_in, protein_ids, peptide_ids) # process id_file.store(filename_out, protein_ids, peptide_ids) \end{verbatim} see also Section~\ref{IdXMLFile}. \subsection{PepXML} \begin{verbatim} from pyopenms import * id_file = PepXMLFile() filename_in = "input.pep.xml" filename_out = "output.pep.xml" protein_ids = [] peptide_ids = [] id_file.load(filename_in, protein_ids, peptide_ids) # process id_file.store(filename_out, protein_ids, peptide_ids) \end{verbatim} \texttt{PepXML} also supports loading with an additional parameter specificing an MSExperiment which contains the retention time corresponding to the peptide hits (since these may not be stored in a pep.xml file). see also Section~\ref{PepXMLFile}. \subsection{ProtXML} \begin{verbatim} from pyopenms import * id_file = ProtXMLFile() filename_in = "input.prot.xml" protein_id = ProteinIdentification() peptide_id = PeptideIdentification() id_file.load(filename_in, protein_id, peptide_id) # process # storing not supported \end{verbatim} \texttt{ProtXML} currently only supports loading of data. \subsection{MzIdentML} \begin{verbatim} from pyopenms import * id_file = MzIdentMLFile() filename_in = "input.mzid" filename_out = "output.mzid" identification = Identification() id_file.load(filename_in, identification) # process id_file.store(filename_out, identification) \end{verbatim} Alternatively, MzIdentMLFile also provides a function to load (but not store) data equivalent to IdXML using two empty vectors that will be filled with \texttt{ProteinIdentification} and \texttt{PeptideIdentification} objects. \subsection{TraML} \begin{verbatim} from pyopenms import * tramlfile = TraMLFile() filename_in = "input.TraML" targeted_exp = TargetedExperiment() idtramlfile.load(filename_in, targeted_exp) # process tramlfilefile.store(filename_out, targeted_exp) \end{verbatim} see also Section~\ref{TraMLFile}. \subsection{MzML} \begin{verbatim} from pyopenms import * file = MzMLFile() exp = MSExperiment() filename_in = "input.mzML" filename_out = "output.mzML" file.load(filename_in, exp) # process file.store(filename_in, exp) \end{verbatim} see also Section~\ref{MzMLFile}. \pagebreak \section{Parameter Handling} Paramter handling in OpenMS and pyOpenMS is usually implemented through inheritance from \texttt{DefaultParamHandler} and allow access to parameters through the \texttt{Param} object. This means, the classes implement the methods \texttt{getDefaults}, \texttt{getParameters}, \texttt{setParameters} which allows access to the default parameters, the current parameters and allows to set the parameters. The Param file that is returned can be manipulated through the \texttt{setValue} and \texttt{getValue} methods (the \texttt{existsts} method can be used to check for existence of a key). Using the \texttt{getDescription} method, it is possible to get a help-text for each parameter value in an interactive session without consulting the documentation. \section{Signal Processing and Filter} Most signal processing algorithms follow a similar pattern in OpenMS. \begin{verbatim} filter = FilterObject() exp = MSExperiment() # populate exp filter.filterExperiment(exp) \end{verbatim} Since they work on a single \texttt{MSExperiment} object, little input is needed to execute a filter directly on the data. Examples of filters that follow this pattern are \texttt{GaussFilter, SavitzkyGolayFilter} as well as the spectral filters \texttt{BernNorm, MarkerMower, NLargest, Normalizer, ParentPeakMower, Scaler, SpectraMerger, SqrtMower, ThresholdMower, WindowMower}. There are multiple ways to access the raw data from mzMl files \begin{verbatim} from pyopenms import * file = MzMLFile() exp = MSExperiment() filename_in = "input.mzML" file.load(filename_in, exp) for spec in exp: for peak in spec: print peak # alternatively for spec in exp: peaks = spec.get_peaks() # process peaks \end{verbatim} \section{Complex algorithmic tools} More complex algorithmic tools require a short explanation and usage: \subsection{Centroided FeatureFinder} The FeatureFinder for centroided data is called \texttt{FeatureFinderAlgorithmPicked} in OpenMS. \begin{verbatim} # set input_path and out_path seeds = FeatureMap() fh = MzMLFile() options = PeakFileOptions() options.setMSLevels([1,1]) fh.setOptions(options) input_map = MSExperiment() fh.load(input_path, input_map) input_map.updateRanges() ff = FeatureFinder() ff.setLogType(LogType.CMD) # Run the feature finder features = FeatureMap() name = FeatureFinderAlgorithmPicked.getProductName() params = FeatureFinder().getParameters(name) ff.run(name, input_map, features, params, seeds) features.setUniqueIds() fh = FeatureXMLFile() fh.store(out_path, features) \end{verbatim} \subsection{OpenSwathAnalyzer} The OpenSwathAnalyzer calls internally an object called MRMFeatureFinderScoring (since it does feature finding based on a scoring approach). It takes as input the chromatograms and the targeted library (transition library) in TraML format. Furthermore, it also takes a transformation description and a Swath file as optional arguments. \begin{verbatim} # load chromatograms chromatograms = pyopenms.MSExperiment() fh = pyopenms.FileHandler() fh.loadExperiment("infile.mzML", chromatograms) # load TraML file targeted = pyopenms.TargetedExperiment(); tramlfile = pyopenms.TraMLFile(); tramlfile.load("tramlfile.TraML", targeted); # Create empty files as input and finally as output empty_swath = pyopenms.MSExperiment() trafo = pyopenms.TransformationDescription() output = pyopenms.FeatureMap(); # set up OpenSwath analyzer (featurefinder) and run featurefinder = pyopenms.MRMFeatureFinderScoring() featurefinder.pickExperiment(chromatograms, output, targeted, trafo, empty_swath) # Store outfile featurexml = pyopenms.FeatureXMLFile() featurexml.store("outfile.featureXML", output) \end{verbatim} \section{Iterators} Several core-OpenMS objects have been adapted to allow iteration in a native way in Python. These objects are currently \texttt{ConsensusMap, FeatureMap, MSExperiment, MSSpectrum} and \texttt{MSChromatogram}. They thus allow the following syntax: \begin{verbatim} for spectrum in ms_experiment: for peak in spectrum: # process an individual peak pass \end{verbatim} \section{Appendix} In this appendix, a complete list of all wrapped functions is given, ordered by class. Note that not all C++ functions are wrapped in Python and the following documentation indicates which do have wrappers and can be used directly from Python. The following appendix also does not contain actual documentation of the functionality, please use the link to the OpenMS documentation to find up-to-date documentation on each class and member function. \include{appendix} \end{document}
{ "alphanum_fraction": 0.7878410624, "avg_line_length": 30.121875, "ext": "tex", "hexsha": "195076b1233cd16999cb031f8a3c19689b127f1f", "lang": "TeX", "max_forks_count": 266, "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:32:35.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-24T14:56:14.000Z", "max_forks_repo_head_hexsha": "70ef98e32b02721f45fe72bd4de4b4833755a66f", "max_forks_repo_licenses": [ "BSL-1.0", "Zlib", "Apache-2.0" ], "max_forks_repo_name": "Amit0617/OpenMS", "max_forks_repo_path": "src/pyOpenMS/doc/Manual.tex", "max_issues_count": 4259, "max_issues_repo_head_hexsha": "70ef98e32b02721f45fe72bd4de4b4833755a66f", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:49:14.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-01T14:07:54.000Z", "max_issues_repo_licenses": [ "BSL-1.0", "Zlib", "Apache-2.0" ], "max_issues_repo_name": "Amit0617/OpenMS", "max_issues_repo_path": "src/pyOpenMS/doc/Manual.tex", "max_line_length": 95, "max_stars_count": 348, "max_stars_repo_head_hexsha": "70ef98e32b02721f45fe72bd4de4b4833755a66f", "max_stars_repo_licenses": [ "BSL-1.0", "Zlib", "Apache-2.0" ], "max_stars_repo_name": "Amit0617/OpenMS", "max_stars_repo_path": "src/pyOpenMS/doc/Manual.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-30T22:55:39.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-17T16:50:12.000Z", "num_tokens": 2454, "size": 9639 }
\subsubsection{\stid{6.03} Sandia ATDM Software Ecosystem and Delivery - Technology Demonstrator} \paragraph{Overview} This project is part of the NNSA/ASC program and is primarily focused on the integration of programming models technologies in existing applications. This project spans both the Code Development and Applications (CDA) and Architecture and Software Development (ASD) components of Sandia's ATDM program element, and this description only covers the ASD portion of the project. The purpose of this project is to broaden the impact of programming model technologies developed in ATDM by integrating these technologies into an existing ASC code suite, evaluating the ability of these technologies to be incrementally added into a legacy code base and to deliver performance improvements on current- and next-generation computing platforms. In particular, this subproject focuses on using and evaluating Kokkos and asynchronous many task (AMT) programming models in the NGP Contact code. Kokkos is an abstraction layer that implements a parallel programming model in C++ for writing performance portable applications targeting all major HPC platforms. NGP Contact is a performance-critical proximity search component relevant to many NNSA applications, including ASC production applications that invoke mechanical or thermal contact, coupled physics transfers, and particle algorithms. This project is important the overall ECP efforts because it evaluates how well technologies developed for next-generation applications can be leveraged by existing large application code bases, such as the ASC Sierra toolkit, in an effort to maximize the impact of ECP technologies. \paragraph{Key Challenges} The main challenge associated with this project is the incremental integration of a new programming model and programming system into an existing very large production application. This challenge is similar in many ways to the incremental integration of OpenMP into an existing large MPI application. As Kokkos is a higher-level abstraction layer that can be implemented using OpenMP, there are additional challenges around the ability of Kokkos' abstractions and interfaces to integrate into an existing application and minimizing the performance penalty of the abstraction layer for different underlying hardware implementations. \paragraph{Solution Strategy} The strategy for this project is to produce demonstration applications that drive the development of an AMT scheduling toolset and to enable leveraging of Sierra-developed technologies to support ATDM application milestones. \paragraph{Recent Progress} The Technology Demonstrator team has continued progress toward a performant, optimized GPU contact implementation. Two final parts of the contact algorithm are being ported to run on the GPU: interaction determination and interaction enforcement. The Sierra Solid Mechanics and NimbleSM teams have begun, and are continuing, joint work on porting the existing NimbleSM CPU-only routines and expect to test GPU execution. Initial performance data indicated that providing a full implementation of GPU contact could provide significant performance improvements over CPU-only execution, and performance data will be gathered once the routines are ready. \paragraph{Next Steps} The team's next focus will be to add a frictional contact capability to NimbleSM using node-face contact. This effort will include demonstrating correctness through a few canonical verification problems, optimization as needed, and evaluations of relative performance on hardware representative of LLNL's ATS-2 platform.
{ "alphanum_fraction": 0.8165950591, "avg_line_length": 51.0136986301, "ext": "tex", "hexsha": "76e07ca8a3e2fa18da143e63c2284d53e15b236a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9563f23b335c3cda19a239a1a8e9086bb682a2c3", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "Franckcappello/ECP-ST-CAR-PUBLIC", "max_forks_repo_path": "projects/2.3.6-NNSA/2.3.6.03-SNL-ATDM/2.3.6.03a-SNL-ATDM-Ecosystem.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9563f23b335c3cda19a239a1a8e9086bb682a2c3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "Franckcappello/ECP-ST-CAR-PUBLIC", "max_issues_repo_path": "projects/2.3.6-NNSA/2.3.6.03-SNL-ATDM/2.3.6.03a-SNL-ATDM-Ecosystem.tex", "max_line_length": 78, "max_stars_count": null, "max_stars_repo_head_hexsha": "9563f23b335c3cda19a239a1a8e9086bb682a2c3", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "Franckcappello/ECP-ST-CAR-PUBLIC", "max_stars_repo_path": "projects/2.3.6-NNSA/2.3.6.03-SNL-ATDM/2.3.6.03a-SNL-ATDM-Ecosystem.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 701, "size": 3724 }
\documentclass[12pt,oneside,a4paper]{article} \input{.TEX/header} \usepackage{blindtext} \titleImage{.TEX/images/anon-crew.png} \title{% Snowden Leaks Example }% \subtitle{% How to publish leaks with style }% \author{\small{\textit{Operation SaveMyEyes}}} \date{\today} \input{.TEX/titlefix} \begin{document} \input{.TEX/inDocumentOptions} \begin{abstract} I hated seeing publications of Anons, Whisteblower, and internet users just written down as it was written on an Model 5150. Although, it is nice for nostalgia the addition of sources, images and comments directly in text is not nice to read.\\ \indent Thefore, I decided to provide a small tempalte which is simple to use and easy on the eyes. Feel free to use it for everything you think is suitable. \end{abstract} \blindmathtrue \section{Test} \Blindtext[5] \section{Test} \Blindtext[25] \subsection{Test} \blindtext[15] \end{document}
{ "alphanum_fraction": 0.7211238293, "avg_line_length": 32.0333333333, "ext": "tex", "hexsha": "09dd70a2f88f3497c7acc7d7793e99d0c35d7534", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "207dca1069e5b1231be3948f1d6db0e60083339d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MariusHerget/WhisteblowerLatexTemplate", "max_forks_repo_path": "template.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "207dca1069e5b1231be3948f1d6db0e60083339d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MariusHerget/WhisteblowerLatexTemplate", "max_issues_repo_path": "template.tex", "max_line_length": 249, "max_stars_count": null, "max_stars_repo_head_hexsha": "207dca1069e5b1231be3948f1d6db0e60083339d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MariusHerget/WhisteblowerLatexTemplate", "max_stars_repo_path": "template.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 269, "size": 961 }
\section{Language} \label{sec:language} Here we define abstract syntax for \langname language. It is a typed functional language with tuples, collections, optional types and \lst{val} binding expressions. The semantics of \langname is specified by first translating it to a core calculus (\corelang) and then by giving its evaluation semantics. Typing rules is given in Section~\ref{sec:typing} and evaluation semantics is given in Section~\ref{sec:evaluation}. \langname is defined here using abstract syntax notation as shown in Figure~\ref{fig:language}. This corresponds to \lst{ErgoTree} data structure, which can be serialized to an array of bytes. The mnemonics shown in the figure correspond to classes of \lst{ErgoTree} reference implementation. \begin{figure}[h] \footnotesize \input{figures/fig_language.tex} \caption{Abstract syntax of ErgoScript language} \label{fig:language} \end{figure} We assign types to the terms in a standard way following typing rules shown in Figure~\ref{fig:typing}. Constants keep both the type and the data value of that type. To be well-formed the type of the constant should correspond to its value. Variables are always typed and identified by unique $id$, which refers to either lambda bound variable of \lst{val} bound variable. The encoding of variables and their resolution is described in Section~\ref{sec:blocks}. Lambda expressions can take a list of lambda-bound variables which can be used in the body expression, which can be \emph{block expression}. Function application takes an expression of functional type (e.g. $T_1 \to T_n$) and a list of arguments. The reason we do not write it $e_f(\Ov{e})$ is that this notation suggests that $(\Ov{e})$ is a subterm, which it is not. Method invocation allows to apply functions defined as methods of \emph{interface types}. If expression $e$ has interface type $I$ and and method $m$ is declared in the interface $I$ then method invocation $e.m(args)$ is defined for the appropriate $args$. Conditional expressions of \langname are strict in condition and lazy in both of the branches. Each branch is an expression which is executed depending on the result of condition. This laziness of branches specified by lowering to \corelang (see Figure~\ref{fig:lowering}). Block expression contains a list of \lst{val} definitions of variables. To be wellformed each subsequent definition can only refer to the previously defined variables. Result of block execution is the result of the resulting expression $e$, which can use any variable of the block. Each type may be associated with a list of method declarations, in which case we say that \emph{the type has methods}. The semantics of the methods is the same as in Java. Having an instance of some type with methods it is possible to call methods on the instance with some additional arguments. Each method can be parameterized by type variables, which can be used in method signature. Because \langname supports only monomorphic values each method call is monomorphic and all type variables are assigned to concrete types (see \lst{MethodCall} typing rule in Figure~\ref{fig:typing}). The semantics of \langname is specified by translating all its terms to a somewhat lower and simplified language, which we call \corelang. This \emph{lowering} translation is shown in Figure~\ref{fig:lowering}. \begin{figure}[h] \begin{center} \begin{tabular}{ l c l } \hline $Term_{\langname}$ & & $Term_{Core}$ \\ \hline $\Low{ \TyLam{x_i}{T_i}{e} }$ & \To & $\Lam{x:(T_0,\dots,T_n)}{ \Low{ \{ \Ov{\lst{val}~x_i: T_i = x.\_i;}~e\} } }$ \\ $\Low{ \Apply{e_f}{\Ov{e_i}} }$ & \To & $\Apply{ \Low{e_f} }{ \Low{(\Ov{e_i})} }$ \\ $\Low{ \Apply{e.m}{\Ov{e_i}} }$ & \To & $\Apply{ \Low{e}.m}{\Ov{ \Low{e_i} }}$ \\ $\Low{ \Tup{e_1, \dots ,e_n} }$ & \To & $\Tup{\Low{e_1}, \dots ,\Low{e_n}}$ \\ $\Low{ e_1~\text{\lst{||}}~e_2 }$ & \To & $\Low{ \IfThenElse{ e_1 }{ \True }{ e_2 } }$ \\ $\Low{ e_1~\text{\lst{\&\&}}~e_2 }$ & \To & $\Low{ \IfThenElse{ e_1 }{ e_2 }{ \False } }$ \\ $\Low{ \IfThenElse{e_{cond}}{e_1}{e_2} }$ & \To & $\Apply{(\lst{if}(\Low{e_{cond}} ,~\Lam{(\_:Unit)}{\Low{e_1}} ,~\Lam{(\_:Unit)}{\Low{e_2}} ))}{}$ \\ $\Low{ \{ \Ov{\text{\lst{val}}~x_i: T_i = e_i;}~e\} }$ & \To & $\Apply{ (\Lam{(x_1:T_1)}{( \dots \Apply{(\Lam{(x_n:T_n)}{\Low{e}})}{\Low{e_n}} \dots )}) }{\Low{e_1}}$\\ $\Low{ \Apply{\delta}{\Ov{e_i}} }$ & \To & $\Apply{\delta}{\Ov{ \Low{e_i} }}$ \\ $\Low{ e }$ & \To & $e$ \\ \end{tabular} \end{center} \caption{Lowering to \corelang} \label{fig:lowering} \end{figure} All $n$-ary lambdas when $n>1$ are transformed to single arguments lambdas using tupled arguments. Note that $\IfThenElse{e_{cond}}{e_1}{e_2}$ term of \langname has lazy evaluation of its branches whereas right-hand-side \lst{if} is a primitive operation and have strict evaluation of the arguments. The laziness is achieved by using lambda expressions of \lst{Unit} $\to$ \lst{Boolean} type. We translate logical operations (\lst{||}, \lst{&&}) of \langname, which are lazy on second argument to \lst{if} term of \langname, which is recursively translated to the corresponding \corelang term. Syntactic blocks of \langname are completely eliminated and translated to nested lambda expressions, which unambiguously specify evaluation semantics of blocks. The \corelang is specified in Section~\ref{sec:evaluation}.
{ "alphanum_fraction": 0.7165528695, "avg_line_length": 47.1217391304, "ext": "tex", "hexsha": "159db621e0ca9923b0c225606f0a8b1f5b52c8a6", "lang": "TeX", "max_forks_count": 19, "max_forks_repo_forks_event_max_datetime": "2022-01-30T02:12:08.000Z", "max_forks_repo_forks_event_min_datetime": "2017-12-28T11:19:17.000Z", "max_forks_repo_head_hexsha": "251784a9f7c1b325c4859fe256c9fe3862fffe4e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jozanek/sigmastate-interpreter", "max_forks_repo_path": "docs/spec/language.tex", "max_issues_count": 486, "max_issues_repo_head_hexsha": "251784a9f7c1b325c4859fe256c9fe3862fffe4e", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:02:28.000Z", "max_issues_repo_issues_event_min_datetime": "2017-12-08T13:07:23.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jozanek/sigmastate-interpreter", "max_issues_repo_path": "docs/spec/language.tex", "max_line_length": 107, "max_stars_count": 41, "max_stars_repo_head_hexsha": "251784a9f7c1b325c4859fe256c9fe3862fffe4e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jozanek/sigmastate-interpreter", "max_stars_repo_path": "docs/spec/language.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-23T19:27:50.000Z", "max_stars_repo_stars_event_min_datetime": "2017-04-21T13:18:44.000Z", "num_tokens": 1640, "size": 5419 }
\section{Introduction} The \commonlisp{} standard contains many references to environments. Most of these references concern \emph{lexical} environments at \emph{compile time}, because they are needed in order to process forms in non-null lexical environments. The standard does not specify the nature of these objects, though in CLtL2 \cite{Steele:1990:CLL:95411} there is a suggested protocol that is sometimes supplied in existing \commonlisp{} implementations. %% LocalWords: startup runtime
{ "alphanum_fraction": 0.8027888446, "avg_line_length": 41.8333333333, "ext": "tex", "hexsha": "fa3d563409dd849a92edb6c5d5df0abe53e80a65", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2021-01-19T20:32:33.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-27T18:18:55.000Z", "max_forks_repo_head_hexsha": "7a54150d94cc1e9f0c8175f7743995ea4894e115", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "thoughtron/SICL", "max_forks_repo_path": "Papers/Environment-info/sec-introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "7a54150d94cc1e9f0c8175f7743995ea4894e115", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "thoughtron/SICL", "max_issues_repo_path": "Papers/Environment-info/sec-introduction.tex", "max_line_length": 70, "max_stars_count": 6, "max_stars_repo_head_hexsha": "7a54150d94cc1e9f0c8175f7743995ea4894e115", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "thoughtron/SICL", "max_stars_repo_path": "Papers/Environment-info/sec-introduction.tex", "max_stars_repo_stars_event_max_datetime": "2021-04-20T15:03:29.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-16T08:20:53.000Z", "num_tokens": 118, "size": 502 }
\chapter{Building The Plugin} \label{chap:importing_the_grammar} This part of the thesis will describe several phases of a~gradual implementation of the MPS plugin. We will show all steps needed and we will keep this chapter in the form of an implementation diary, talking about the way the author proceeded. It will allow us to slowly walk through all the obstacles the author has encountered. We believe that this will give readers a~better insight into the problematics than just describing the final solution. It might also help others who might deal with similar problems and help them to understand the problems more deeply, maybe choose a~different path or perhaps just to avoid some pitfalls we have discovered on our own. \\ The chapter first defines our custom language that we will be showing examples on. Then we will talk about parsing grammar files. Lastly, we will talk about several approaches on how to tackle generation of language's aspects (structure, editor, and TextGen). \input{chapters/importing_the_grammar/the_simplexml_language} \newpage \input{chapters/importing_the_grammar/generating_code_inside_mps} \newpage \input{chapters/importing_the_grammar/parsing_the_grammar} \newpage \input{chapters/importing_the_grammar/the_structure_aspect} \input{chapters/importing_the_grammar/the_editor_aspect} \newpage \input{chapters/importing_the_grammar/the_textgen_aspect}
{ "alphanum_fraction": 0.8097560976, "avg_line_length": 43.4848484848, "ext": "tex", "hexsha": "c94fa636b2e78c1932e2f07442e3fe98ddadda39", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6726cb87a29f81cad2f2786197bd9128a4422c1b", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "premun/diploma-thesis", "max_forks_repo_path": "chapters/importing_the_grammar.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6726cb87a29f81cad2f2786197bd9128a4422c1b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "premun/diploma-thesis", "max_issues_repo_path": "chapters/importing_the_grammar.tex", "max_line_length": 217, "max_stars_count": 3, "max_stars_repo_head_hexsha": "6726cb87a29f81cad2f2786197bd9128a4422c1b", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "premun/diploma-thesis", "max_stars_repo_path": "chapters/importing_the_grammar.tex", "max_stars_repo_stars_event_max_datetime": "2018-12-18T13:37:21.000Z", "max_stars_repo_stars_event_min_datetime": "2018-12-17T21:20:29.000Z", "num_tokens": 305, "size": 1435 }
%!TEX root = lucene4IR2016workshop_report.tex \subsection*{Black Boxes are Harmful} {\bf Sauparna Palchowdhury (National Institute of Standards and Technology, Gaithersburg, Maryland, USA)}: Having seen students and practitioners in the IR community grapple with abstruse documentation accompanying search systems and their use as a black box, Sauparna, in his talk, argued why Lucene is a useful alternative and how and why we must ensure it does not become another black box. In establishing his views, he described the pitfalls in an IR experiment and the ways of mitigation. The suggestions he put forth, as a set of best practices, highlighted the importance of evaluation in IR to render an experiment reproducible and repeatable and the need for a well-documented system with correct implementations of search algorithms that are traceable to a source in IR literature. In the absence of such constraints on experimentation students are misled and learn little from the results of their experiments and it becomes hard to reproduce the experiments. As an example, the talk cited a wrong implementation of the \emph{Okapi BM25} term-weighting equation in a popular research retrieval system (Table \ref{tab:tfxidf}). Following this was a brief how-to on implementing \emph{BM25} (or any TF$\times$IDF weighting scheme) in Lucene (Table \ref{tab:lucene}). This also explained Lucene's way of computing the similarity between two text documents (usually referred to as \emph{Lucene's scoring formula}\footnote{\url{https://goo.gl/ZOMVYe}}). Some of the points of failure mentioned in the talk were misplaced test-collection pieces (document-query-qrel triplet), counterintuitive configuration interfaces of systems, poor documentation that make systems look enigmatic and lead to the creation of heuristics passed around by word-of-mouth, naming confusion (a myriad of TF$\times$IDF model names), blatant bugs and a obscure parser. As mitigation, Sauparna listed some of the things he did as an experimenter. He wrote a script (TRECBOX\footnote{\url{https://github.com/sauparna/TRECBOX}}) to abstract parts of the IR experiment pipeline and map them to configuration end-points of the three systems; Indri~\cite{Strohman05indri:a}, Terrier~\cite{Ounis:2005:TIR:2149960.2150009}, and Lucene~\cite{apache:lucene}. This would enable documenting and sharing an experiment's design in plain text files. He constructed a survey of term-weighting equations titled \emph{TF$\times$IDF Repository}\footnote{\url{http://kak.tx0.org/IR/TFxIDF}} meant to be a single point of reference to help disambiguate the variants in the wild. All equations mentioned in this repository are traceable to a source in IR literature. He also showed how to visually juxtapose evaluation results obtained using a permutation of a set of systems, retrieval models and test-collections on a chart that would act as a sanity check for the system's integrity. As a part of these investigations he modified Lucene for use with TREC collections (the mod was named LTR\footnote{\url{https://github.com/sauparna/LTR}}) which is available for others to use. The ``mod'' is also accompanied by notes to augment Lucene's documentation. The gamut of Sauparna's work is collected on a website\footnote{\url{http://kak.tx0.org/IR}}. Lucene's documentation does not use a well-defined notation to represent its way of computing the similarity score between a query $Q$ and document $D$. Equation \eqref{eqn:Lucene-scoring} denotes Lucene's scoring formula as described in Lucene's documentation. In the equation, $T$ denotes a term. The functions, in order from left to right, on the right-hand-side of the equation is the \emph{coordination factor}, \emph{query normalization factor}, \emph{term-frequency transformation}, \emph{document-frequency transformation}, \emph{query boost} and \emph{document-length normalization factor}. A well-defined, generalized, notation for Lucene's scoring, in step with the definition from Lucene's documentation, is Equation \eqref{eqn:Lucene-scoring-generalized} (function names were shortened appropriately). \begin{equation} score(Q,D) = coord(Q,D) \cdot qnorm(Q) \cdot \displaystyle\sum_{T \in Q} (tf(T \in D) \cdot idf(T)^2 \cdot boost(T) \cdot norm(T,D)) \label{eqn:Lucene-scoring} \end{equation} \begin{equation} score(Q,D) = f_{c}(Q,D) \cdot f_{q}(Q) \cdot \displaystyle\sum_{T \in Q \cap D}(tf(T) \cdot df(T) \cdot f_{b}(T) \cdot f_{n}(T,D))) \label{eqn:Lucene-scoring-generalized} \end{equation} To explain Lucene's scoring, Sauparna picked two popular TF$\times$IDF variants, broke them down into meaningful components (a term-frequency transformation, a document-frequency transformation and a length-normalization coefficient) and plugged these components into Lucene's equation. The components in Lucene's equation that were left unused were replaced by the integer $1$, meaning, the functions returned $1$; which would have no effect on the chain of multiplications. Table \ref{tab:tfxidf} lists the variants and components and Table \ref{tab:lucene} shows where the components were transplanted to. \begin{table} \centering \small \begin{minipage}[t]{0.65\textwidth} \begin{tabular}{lcc} \multicolumn{3}{c}{TF$\times$IDF Variants: What's correct and what's not.}\\ \\ Name & $w_{ik}$ & $w_{jk}$\\ \hline\hline \\ \emph{BM25}(A) & $\frac{f_{ik}}{k_{1}((1-b)+b\frac{dl_{i}}{avdl})+f_{ik}} \times \log(\frac{N-n_{k}+0.5}{n_{k}+0.5})$ & $\frac{(k_{3}+1)f_{jk}}{k_{3}+f_{jk}}$ \\ \\ \emph{BM25}(B) & $\frac{(k_{1}+1)f_{ik}}{k_{1}((1-b)+b\frac{dl_{i}}{avdl})+2f_{ik}} \times \log(\frac{N-n_{k}+0.5}{n_{k}+0.5})$ & $\frac{(k_{3}+1)f_{jk}}{k_{3}+f_{jk}}$ \\ \\\hline \\ \emph{Okapi BM25} & $\frac{(k_{1}+1)f_{ik}}{k_{1}((1-b)+b\frac{dl_{i}}{avdl})+f_{ik}} \times \log(\frac{N-n_{k}+0.5}{n_{k}+0.5})$ & $\frac{(k_{3}+1)f_{jk}}{k_{3}+f_{jk}}$ \\ \\ components & $TF \times DF$ & $QTF$ \\ \\\hline \\ \emph{SMART dtb.nnn} & $\frac{(1+\log(1+\log(f_{ik}))) \times \log(\frac{N+1}{n_{k}})}{1-s+s \cdot \frac{b_{i}}{avgb}}$ & $f_{jk}$ \\ \\ components & $TF \times DF \div LN$ & $QTF$ \\ \\\hline\hline \end{tabular} \caption{ \small The similarity score; $score(D_{i},D_{j})=\sum_{k=1}^{t}(w_{ik} \cdot w_{jk})$ $\forall i \neq j$, combines the weight of a term $k$ over the $t$ terms which occur in document $D_{i}$ and $D_{j}$. Since a query can also be thought of as a document in the same vector space, the symbol $D_{j}$ denotes a query. \emph{BM25}(A) and \emph{BM25}(B) are the two incorrect implementations found in a popular retrieval system. Comparing them to \emph{Okapi BM25} on the third row shows that A has the $k_{1}+1$ factor missing in the numerator, and B uses twice the term-frequency, $2f_{ik}$, in the denominator. Neither can they be traced to any source in IR literature, nor does the system's documentation say anything about them. The \emph{Okapi BM25} and the \emph{SMART dtb.nnn} variants are known to be effective formulations developed by trial and error over eight years of experimentation at TREC 1 through 8. Their forms have been abstracted using the abbreviations $TF$, $DF$, $LN$ and $QTF$ (term-frequency, document-frequency, length-normalization and query-term-frequency) to show how these components fit in Lucene's term-weight expression.} \label{tab:tfxidf} \end{minipage} \end{table} \begin{table}[bht!] \centering \small \begin{minipage}[t]{0.94\textwidth} \begin{tabular}{lccccccccccccc} \multicolumn{14}{c}{Implementing TF$\times$IDF variants in Lucene} \\ \hline\hline Lucene & $f_{c}(Q,D)$ & $\cdot$ & $f_{q}(Q)$ & $\cdot$ & $\displaystyle\sum_{T \in Q \cap D}($ & $tf(T)$ & $\cdot$ & $df(T)$ & $\cdot$ & $f_{b}(T)$ & $\cdot$ & $f_{n}(T,D)$ & $)$ \\ \emph{BM25} & $1$ & $\cdot$ & $1$ & $\cdot$ & $\displaystyle\sum_{T \in Q \cap D}($ & $TF$ & $\cdot$ & $DF$ & $\cdot$ & $QTF$ & $\cdot$ & $1$ & $)$ \\ \emph{dtb.nnn} & $1$ & $\cdot$ & $1$ & $\cdot$ & $\displaystyle\sum_{T \in Q \cap D}($ & $TF$ & $\cdot$ & $DF$ & $\cdot$ & $QTF$ & $\cdot$ & $LN$ & $)$ \\ \hline\hline \end{tabular} \caption{\small Plugging components of the TF$\times$IDF equation into Lucene's scoring equation; the first row is the generalized form and the following two rows show the components of two popular TF$\times$IDF equations from Table \ref{tab:tfxidf} transplanted to Lucene's equation.} \label{tab:lucene} \end{minipage} \end{table} Making a reference to the SIGIR 2012 tutorial on \emph{Experimental Methods for Information Retrieval}~\cite{Metzler:2012:EMI:2348283.2348534}, Sauparna stated that we need to take a more rigorous approach to the IR experimental methodology. A list of best practices were recommended that would add more structure to IR experiments and prevent the use of systems as black boxes. These were: \begin{enumerate} \item Record test-collection statistics. \item Provide design documentation for systems. \item Use a consistent naming scheme and a well-defined notation. \item Use a evaluation table as a sanity check. \item Isolate shareable experimental artifacts. \item Ensure that implementations are traceable to a source in IR literature. \end{enumerate} In conclusion, Sauparna suggested that if we, the IR research community, were to build and work with Lucene, it would be helpful to consider these points when introducing new features into Lucene.
{ "alphanum_fraction": 0.6985457483, "avg_line_length": 46.4882629108, "ext": "tex", "hexsha": "2e3a1f341fac0ccb009dd56ad983a5dc0249a4d2", "lang": "TeX", "max_forks_count": 16, "max_forks_repo_forks_event_max_datetime": "2017-08-02T15:09:47.000Z", "max_forks_repo_forks_event_min_datetime": "2016-09-07T17:58:37.000Z", "max_forks_repo_head_hexsha": "487187768d57d4e0ae6751de98614c4161e7b4ad", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "amitkumarj441/lucene4ir", "max_forks_repo_path": "sigirforumreport/rup.tex", "max_issues_count": 5, "max_issues_repo_head_hexsha": "487187768d57d4e0ae6751de98614c4161e7b4ad", "max_issues_repo_issues_event_max_datetime": "2020-08-11T01:46:54.000Z", "max_issues_repo_issues_event_min_datetime": "2017-08-28T07:30:22.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "amitkumarj441/lucene4ir", "max_issues_repo_path": "sigirforumreport/rup.tex", "max_line_length": 134, "max_stars_count": 39, "max_stars_repo_head_hexsha": "487187768d57d4e0ae6751de98614c4161e7b4ad", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "amitkumarj441/lucene4ir", "max_stars_repo_path": "sigirforumreport/rup.tex", "max_stars_repo_stars_event_max_datetime": "2020-02-09T02:05:27.000Z", "max_stars_repo_stars_event_min_datetime": "2017-08-11T00:08:15.000Z", "num_tokens": 2927, "size": 9902 }
\chapter{Graphs} A \emph{graph} is an ordered pair $G=(V,E)$ where $V$ is a set of \emph{vertices} and $E$ is a set of \emph{edges}. An edge is a pair of vertices which are said to be \emph{adjacent}. An edge is said to be \emph{incident} on its component vertices. Usually we consider edges to be unordered, in which case the graph is undirected and an edge $\{A,B\}$ connects $A$ to $B$ and $B$ to $A$. For example: \begin{align*} V &= \{ A, B, C \} \\ E &= \{ (A, B), (B, C) \} \end{align*} Which can be represented more visually: { \includegraphics[scale=0.2]{SimpleGraph} %\caption{Demonstrates a simple graph} \label{fig:SimpleGraph} } Note that the vertices are represented by labeled circles and the edges are represented by lines connecting vertices to one another. The number of edges connecting a vertex $v$ is called the degree of $v$ or $deg(v)$. In the above example: \begin{align*} deg(A) = deg(C) &= 1 \\ deg(B) &= 2 \end{align*} Sometimes an edge is directional, meaning the pair of vertices in an edge is ordered. In other words, the edge $(A, B)$ connects $A$ to $B$, but not $B$ to $A$. We say such an edge is incoming on $B$ and outgoing on $A$. A graph whose edges are ordered pairs is called a $directed graph$ or $digraph$. This is represented visually by an edge with an arrow at one end, indicating the direction: { \includegraphics[scale=0.2]{DiGraph} %\caption{Demonstrates a directed graph} \label{fig:DiGraph} } In a digraph, the number of incoming edges of $v$ is the in-degree or $deg^-(v)$. Similarly, the number of outgoing edges is the out-degree or $deg^+(v)$. In the above example: % \begin{align*} deg^+(A) = deg^+(B) &= 1 \\ deg^-(B) = deg^-(C) &= 1 \\ deg^+(C) = deg^-(A) &= 0 \end{align*} A \emph{simple graph} is a graph which contains no edges from any vertex $v$ to itself $ (v,v) $, called loops. A simple graph also contains no multi-edges which connect more than two vertices. A \emph{path} is a sequence of edges from vertex $u$ to vertex $v$. Two vertices are said to be \emph{connected} if there exists a path between them. A graph is said to be connected if for any two vertices, there exists a path between them. An adjacent vertex is called a \emph{neighbour}. A \emph{complete} graph is one in which every vertex is adjacent to every other vertex. A \emph{subgraph} is a graph consisting of a subset of the vertices and edges in another graph. A \emph{connected component} is a connected subgraph which does not disconnect any adjacent vertices. It should be easy to see that a connected graph has exactly one connected component. A \emph{cycle} in a graph is when there exists a path from a vertex back to itself without crossing any edge more than once. A graph is said to be \emph{acyclic} when it does not contain cycles. \section{Representation} \subsection{Adjacency List} One way to represent a graph is for every vertex, store a list of neighbours. For example: { \includegraphics[scale=0.2]{SimpleGraph} %\caption{Demonstrates a simple graph} \label{fig:SimpleGraph} } can be represented: { \includegraphics[scale=0.7]{AdjacencyList} %\caption{Demonstrates an adjacency list for a simple graph} \label{fig:AdjacencyList} } And { \includegraphics[scale=0.2]{DiGraph} %\caption{Demonstrates a directed graph} \label{fig:DiGraph} } can be represented: { \includegraphics[scale=1.0]{AdjacencyListDigraph} %\caption{Demonstrates an adjacency list for a directed graph} \label{fig:AdjacencyListDigraph} } \subsection{Adjacency Matrix} Another representation of a graph is as a square matrix with $n$ rows and columns where the element at row $i$ and column $j$ is 1 if there is an edge between $v_i$ and $v_j$ and 0 otherwise. For example: { \includegraphics[scale=0.2]{SimpleGraph} %\caption{Demonstrates a simple graph} \label{fig:SimpleGraph} } can be represented \[ \left[ \begin{array}{cccc} & a & b & c \\ a & 0 & 1 & 0 \\ b & 1 & 0 & 1 \\ c & 0 & 1 & 0 \end{array} \right] \] As for directed graphs, row $i$ column $j$ is 1 if there exists an edge $(v_i,v_j)$. For example: { \includegraphics[scale=0.2]{DiGraph} %\caption{Demonstrates a directed graph} \label{fig:DiGraph} } can be represented \[ \left[ \begin{array}{cccc} & a & b & c \\ a & 0 & 1 & 0 \\ b & 0 & 0 & 1 \\ c & 0 & 0 & 0 \end{array} \right] \]
{ "alphanum_fraction": 0.7012542759, "avg_line_length": 25.7941176471, "ext": "tex", "hexsha": "fa96b3ecc4ce6e68e3d954332cfa8f82f5e8ba59", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "13a04cc4a6bd8dec5e35c1a42b96680d47a98962", "max_forks_repo_licenses": [ "BSD-2-Clause-FreeBSD" ], "max_forks_repo_name": "SteamedPears/AllTheAlgorithms", "max_forks_repo_path": "graphs.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "13a04cc4a6bd8dec5e35c1a42b96680d47a98962", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause-FreeBSD" ], "max_issues_repo_name": "SteamedPears/AllTheAlgorithms", "max_issues_repo_path": "graphs.tex", "max_line_length": 70, "max_stars_count": 3, "max_stars_repo_head_hexsha": "13a04cc4a6bd8dec5e35c1a42b96680d47a98962", "max_stars_repo_licenses": [ "BSD-2-Clause-FreeBSD" ], "max_stars_repo_name": "SteamedPears/AllTheAlgorithms", "max_stars_repo_path": "graphs.tex", "max_stars_repo_stars_event_max_datetime": "2017-05-01T03:13:05.000Z", "max_stars_repo_stars_event_min_datetime": "2016-10-12T19:16:53.000Z", "num_tokens": 1354, "size": 4385 }
\section{\protect\uppercase{Implementation}} \subsection{Computational framework} Peridigm is used in the context of the present study \cite{PeridigmUserGuide100}. It is an open-source computational state-based PD code developed at Sandia National Laboratories for massively-parallel multi-physics simulations. Peridigm uses a FE mesh as basis for its discretizations. Hexahedron and tetrahedron elements are transformed into peridynamic collocation points and associated with the respective element volume. Different material properties can be assigned by dividing the model into multiple blocks. \subsection{Stochastic model} To reduce possible dependencies of the solution from the underlying discretization scheme, a stochastic distribution of elastic material properties is proposed to incorporate the statistical nature of damage initiation (\autoref{fig:Thry:Stochastic:Implementation}). Additionally, this gives a possibility to check whether a failure pattern is driven by the chosen discretization or an actual phenomenon. \cite{SillingSA2007b} published a similar idea for capturing damage evolution by introducing fluctuations in the critical stretch by means of a Weibull or other distribution. The stochastic distribution of the elastic constants is also motivated by scatter in stress-strain curves and locations of failure of different test specimen and findings in micrographs in the bulk resin specimen (\autoref{fig:Exp:Tension}). These deviations may be caused by micro-voids, locally varying degree of cure in the epoxy material or slight disparities of the specimen geometries caused by the machining process. Introduction of a stochastic material distribution has the goal to filter and numerical effects in the simulation and to ensure, that the dominating effect causing the physical failure is adequately described in the numerical model. The calculations have to be performed multiple times with different stochastic distributions to assure the dominating effect is adequately triggered. \begin{figure}[htbp] \setlength{\figheight}{5cm} \begin{subfigure}{0.49\linewidth} \begin{minipage}[b][\figheight]{\linewidth} \centering % Variables \def\nb{20} \def\ne{10} % Picture \input{../../Material/Figures/Fig_Implementation_Stochastic_Constant} \end{minipage} \caption{Simple stochastic model} \label{fig:Thry:Stochastic:Distribution} \end{subfigure} \hfill \begin{subfigure}{0.49\linewidth} \begin{minipage}[b][\figheight]{\linewidth} %\includegraphics[width=\linewidth,height=\figheight,keepaspectratio]{../../Material/Figures/Model_FE_Hex_0-5_Stochastic_ct.png} \includegraphics[width=\linewidth,height=\figheight,keepaspectratio]{Model_FE_Hex_0-5_Stochastic_ct} \end{minipage} \caption{Base FE mesh with stochastic block distribution} \label{fig:Thry:Stochastic:FEModel} \end{subfigure} \caption{Implementation of stochastic material distribution for PD simulations} \label{fig:Thry:Stochastic:Implementation} \end{figure} When Peridigm computes the internal force, it computes a force state at each node in the model and applies that force state to each bond that is attached to the node. For each bond, the resulting force density is applied to the node itself, and negative one times the force density is applied to the node on the other end of the bond. This is consistent with the state-based formulation in \autoref{eq:PeridynamicLimits}. The way Peridigm handles material interfaces is basically a direct application of \autoref{eq:PeridynamicLimits}. The result at a material interface is an average of the two material models. Thus, a block-based stochastic model is possible by simply assigning materials with different elastic constants. As the nature of the distribution of stochastic effects in the real specimen is currently unknown, a rather simple approach is chosen for their modeling. During the creation of the specimen, elements in the damage-prone area are stochastically associated to multiple block definitions. Each block is associated with a material that has a defined deviation from the nominal elastic constants. The number of different block definitions and the maximum deviation from the nominal elastic constants can be chosen randomly. More complex distribution, such as Gaussian or Weibull distribution, may be implemented in the future if the approach seems promising. % % Weibull % \begin{tikzpicture} % \def\a{10} % \def\b{1} % \begin{axis}[ % smooth % ] % \addplot[domain=0:2,samples=100] {(\a/\b)*(x/\b)^(\a-1)*e^(-(x/\b)^\a)}; % \end{axis} % \end{tikzpicture} % \href{http://imechanica.org/files/Silling_Peridynamic_McMat07.pdf}{http://imechanica.org/files/Silling\_Peridynamic\_McMat07.pdf}, Slide 20: % % \cite{SillingSA2007b} % % \begin{itemize} % \item We can introduce fluctuations in critical stretch as a function of position and bond orientation according to Weibull or other distribution. % \item This is one way of incorporating the statistical nature of damage evolution. % \end{itemize}
{ "alphanum_fraction": 0.7905060808, "avg_line_length": 80.9206349206, "ext": "tex", "hexsha": "3bb528f92fcaeeeeac8a1dcf3dba041c6fa6b8fe", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "oldninja/PeriDoX", "max_forks_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Implementation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "oldninja/PeriDoX", "max_issues_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Implementation.tex", "max_line_length": 1386, "max_stars_count": null, "max_stars_repo_head_hexsha": "f31bccc7b8ea60cd814d00732aebdbbe876a2ac7", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "oldninja/PeriDoX", "max_stars_repo_path": "Publications/2017_ECCOMAS/Texts/Paper/Sections/Implementation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1160, "size": 5098 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{amsmath,amssymb} \usepackage[pdfborder={0 0 0}]{hyperref} \newcommand\set[1]{\left\{#1\right\}} \begin{document} \section{Exercise 9} \subsection{Straight} This is five cards in a sequence (e.g., 4,5,6,7,8), with aces allowed to be either 1 or 13 (low or high) and with the cards allowed to be of the same suit (e.g., all hearts) or from some different suits. The number of such hands is $10 \cdot {4 \choose 1}^5$. The probability is 0.003940 \subsection{3 of a kind} This hand has the pattern AAABC where A, B, and C are from distinct kinds. The number of such hands is ${13 \choose 1}{4 \choose 3}{12 \choose 2}{4 \choose 1}^2 = 54 912$. After dividing by ${52 \choose 5}$, the probability is $0.021128$. \subsection{2 pair} This hand has the pattern AABBC where A, B, and C are from distinct kinds. The number of such hands is ${13 \choose 2}{4 \choose 2}{4 \choose 2}{11 \choose 1}{4 \choose 1}$. After dividing by ${52 \choose 5}$, the probability is $0.047539$. \subsection{Pair} This the hand with the pattern AABCD, where A, B, C and D are from the distinct "kinds" of cards: aces, twos, threes, tens, jacks, queens, and kings (there are 13 kinds, and four of each kind, in the standard 52 card deck). The number of such hands is ${13 \choose 1}\cdot{4 \choose 2}\cdot{12 \choose 3}\cdot {4 \choose 1}^3$. If all hands are equally likely, the probability of a single pair is obtained by dividing by ${52 \choose 5}$. This probability is 0.422569. \subsection{High card} We have to choose 5 distinct kinds ${13 \choose 5}$ but exclude any straights (subtract 10). We can have any pattern of suits except the 4 patterns where all 5 cards have the same suit: $4^5-4$. The total number of such hands is $[{13 \choose 5}-10] \cdot (4^5-4)$. The probability is $0.501177$. \section{Whiteboard solution} \[ \Omega = \set{\omega \subseteq \set{1, \dots, 52}\,|\, |\omega| = 5} \] \begin{description} \item[Straight] \[ \mathbb P(\text{straight}) = \frac{10 \cdot 4^5}{{52 \choose 5}} - \frac{36}{{52 \choose 5}} - \frac{4}{{52 \choose 5}} = \frac{10200}{{52 \choose 5}} = 0.003925 \] \item[3 with same rank] $13$ different ranks. \[ {4 \choose 3} \text{ for color combinations} \] \[ {12 \choose 2} \text{ possibilities for remainder} \] \[ 4^2 \text{ color combinations} \] \[ \mathbb P(\text{3 of a kind}) = \frac{13 {4 \choose 3} {12 \choose 2} 4^2}{{52 \choose 5}} - \frac{54912}{{52 \choose 5}} \] \item[2-pair] $2\times 2$ cards of same rank \[ \mathbb P(\text{2-pair}) = \frac{{13 \choose 2} {4 \choose 2} {4 \choose 2} {11 \choose 1} \cdot 4}{{52 \choose 5}} \] \item[pair] 13 for the rank, ${4 \choose 2}$ for colors ensures that we avoid duplicates \[ \mathbb P(\text{pair}) = \frac{13 {4 \choose 2} {12 \choose 3} 4^3}{|\Omega|} \] \item[high card] We can simply compute $1 - \text{(what we had so far)}$, but we will be exhaustive here: Possibilities for $5$ cards: ${13 \choose 5}$ \\ -10 straights: $[{13 \choose 5} - 10]$ \\ $4^5$ color combinations \\ $-4$: $[4^5 - 4]$ \[ \mathbb P(\text{high card}) = \frac{[{13 \choose 5} - 10] [4^5 - 4]}{{52 \choose 5}} \] \end{description} \end{document}
{ "alphanum_fraction": 0.6559238797, "avg_line_length": 56.1724137931, "ext": "tex", "hexsha": "e0744e9786325ea196e4f771366c7a4f46485cd1", "lang": "TeX", "max_forks_count": 6, "max_forks_repo_forks_event_max_datetime": "2020-10-25T11:00:11.000Z", "max_forks_repo_forks_event_min_datetime": "2019-03-24T14:42:30.000Z", "max_forks_repo_head_hexsha": "d1a94e128d13ce4399a9cc55323b2f8e0d9494fd", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "prokls/math-lecture-notes", "max_forks_repo_path": "probability_theory_practicals/ex9/solution.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "d1a94e128d13ce4399a9cc55323b2f8e0d9494fd", "max_issues_repo_issues_event_max_datetime": "2019-07-02T09:32:40.000Z", "max_issues_repo_issues_event_min_datetime": "2019-05-22T07:56:03.000Z", "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "prokls/math-lecture-notes", "max_issues_repo_path": "probability_theory_practicals/ex9/solution.tex", "max_line_length": 469, "max_stars_count": 7, "max_stars_repo_head_hexsha": "d1a94e128d13ce4399a9cc55323b2f8e0d9494fd", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "prokls/math-lecture-notes", "max_stars_repo_path": "probability_theory_practicals/ex9/solution.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-26T14:47:36.000Z", "max_stars_repo_stars_event_min_datetime": "2015-11-25T01:49:55.000Z", "num_tokens": 1148, "size": 3258 }
\documentclass[en]{oucart} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{mathrsfs} \usepackage[numbers]{natbib} \usepackage[fit]{truncate} \newcommand{\truncateit}[1]{\truncate{0.8\textwidth}{#1}} \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{claim}[theorem]{Claim} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{question}{Question} \newtheorem{conjecture}[theorem]{Conjecture} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{notation}[theorem]{Notation} \newtheorem{exercise}[theorem]{Exercise} \title{超整数的代数算数点和韦氏三角形的一个例子} \entitle{Algebraically Arithmetic Points of Super-Integral, Uncountable, Weyl Triangles and an Example of Archimedes} \author{A. Lastname} \studentid{MathGen\_001} \advisor{MathGen} \department{数学科学学院}{2011 级信息与计算科学} \cnabstractkeywords{ 令 $\tilde{g} \ge 1$ 为任意的. 近年来学者的兴趣集中在构造黎曼和莫比乌斯域上. 我们证明 $\mathbf{{l}}'' \to E''$. 我们希望拓展 \cite{cite:0} 中的结果到函数上. 在 \cite{cite:0} 中, 作者描述了独特的三角形. }{ 超整数, 代数算数点, 韦氏三角形, 阿基米德 } \enabstractkeywords{ Let $\tilde{g} \ge 1$ be arbitrary. Recent interest in elements has centered on constructing Riemannian, M\"obius domains. We show that $\mathbf{{l}}'' \to E''$. We wish to extend the results of \cite{cite:0} to functions. In \cite{cite:0}, the authors characterized unique triangles. }{ Algebraically Arithmetic Points, Super-Integral, Uncountable, Weyl Triangles, Archimedes } \begin{document} \makecover \makeabstract \tableofcontents \newpage \section{Introduction} In \cite{cite:0}, it is shown that \begin{align*} \Lambda'' \left( \Omega^{4}, \sqrt{2} \pi \right) & > 0^{9} \\ & \ne \left\{ 2 \colon \overline{G 0} \supset \sinh \left( \frac{1}{\sqrt{2}} \right) \times \mathscr{{M}} \left( | J |^{-6}, \dots, \sqrt{2}^{-2} \right) \right\} .\end{align*} In this setting, the ability to describe sets is essential. It is not yet known whether there exists a Kolmogorov linear, Volterra triangle, although \cite{cite:1} does address the issue of surjectivity. Here, uniqueness is obviously a concern. M. Sasaki's computation of totally $p$-adic equations was a milestone in hyperbolic knot theory. The work in \cite{cite:2} did not consider the analytically co-abelian case. Recent interest in hulls has centered on classifying nonnegative, Einstein homomorphisms. It was Pascal who first asked whether meager systems can be described. In \cite{cite:3}, it is shown that every commutative, infinite random variable is commutative and semi-admissible. Every student is aware that $$2 \ne \int_{I} \phi \left( \mathcal{{W}}, N^{-4} \right) \,d \mathscr{{O}}.$$ The work in \cite{cite:1} did not consider the Banach, almost parabolic, contravariant case. P. D'Alembert \cite{cite:3} improved upon the results of P. Johnson by studying pairwise independent, Huygens, infinite lines. In this setting, the ability to extend Galileo, anti-pairwise Weierstrass fields is essential. This could shed important light on a conjecture of Bernoulli. It was Cantor who first asked whether universal hulls can be constructed. This reduces the results of \cite{cite:2,cite:4} to a standard argument. This reduces the results of \cite{cite:5} to results of \cite{cite:6}. The goal of the present paper is to compute algebraically super-extrinsic functors. The goal of the present article is to characterize universally $\mathbf{{b}}$-Chebyshev lines. Is it possible to construct homeomorphisms? Every student is aware that there exists an ordered extrinsic point. Unfortunately, we cannot assume that there exists a freely $C$-injective, open, meromorphic and stochastic measure space. Recent developments in numerical arithmetic \cite{cite:0} have raised the question of whether $${e_{\mathscr{{M}},\mathcal{{Q}}}}^{-1} \left( 1 \right) \le \int_{\sqrt{2}}^{1} \Theta \left( V \times | {\mathscr{{Y}}^{(i)}} |, {\mathscr{{Q}}_{f}} \right) \,d \mathscr{{Y}}.$$ Moreover, it is essential to consider that $\tilde{i}$ may be partially non-associative. \section{Main Result} \begin{definition} Let us assume we are given a pairwise compact, left-null, semi-Noetherian line $E$. A topos is a \textbf{morphism} if it is real. \end{definition} \begin{definition} A subgroup ${\mathscr{{X}}_{\chi,\eta}}$ is \textbf{hyperbolic} if $\mathscr{{M}}''$ is homeomorphic to $z$. \end{definition} In \cite{cite:4}, it is shown that there exists a minimal, algebraically anti-elliptic and universally right-affine nonnegative definite, non-Klein hull. Recently, there has been much interest in the description of embedded, dependent factors. In contrast, in this setting, the ability to derive pseudo-linear manifolds is essential. \begin{definition} Assume we are given a Pythagoras--Legendre, continuously onto, negative line $\eta$. An almost normal, quasi-commutative, left-locally covariant functional is a \textbf{prime} if it is naturally hyper-real and prime. \end{definition} We now state our main result. \begin{theorem} $\Psi = \bar{\eta}$. \end{theorem} The goal of the present article is to extend right-reducible, countable, degenerate paths. This leaves open the question of naturality. A central problem in convex set theory is the classification of sub-unconditionally natural, Riemannian, everywhere $\psi$-Galileo scalars. \section{Applications to Real Analysis} We wish to extend the results of \cite{cite:7} to finitely $\mathscr{{P}}$-tangential elements. In future work, we plan to address questions of maximality as well as separability. In \cite{cite:8}, the authors characterized anti-Eisenstein, admissible sets. Here, finiteness is clearly a concern. It is essential to consider that $N''$ may be linear. This reduces the results of \cite{cite:5,cite:9} to an easy exercise. It would be interesting to apply the techniques of \cite{cite:10} to generic hulls. In \cite{cite:4}, the authors address the admissibility of continuously non-Riemannian isometries under the additional assumption that every super-Bernoulli algebra is dependent and elliptic. Unfortunately, we cannot assume that \begin{align*} \overline{\Sigma} & = \left\{ {O_{\iota,a}} \pm \pi \colon \Phi' \left( \frac{1}{e}, \frac{1}{\Psi} \right) \le \varinjlim \overline{-\emptyset} \right\} \\ & \cong \left\{-i \colon {\Lambda_{T,\sigma}} \left( \tilde{\mathfrak{{m}}}^{9}, \dots, 1 \times {\mathcal{{C}}^{(s)}} \right) < \int_{0}^{\sqrt{2}} \Gamma \left( \gamma^{5} \right) \,d \bar{r} \right\} \\ & = \left\{-1 \colon \overline{{G^{(\mathcal{{D}})}}^{6}} \ne \varinjlim \mathscr{{Q}} \left( \frac{1}{0}, \dots, J ( \psi'' ) \right) \right\} .\end{align*} It is essential to consider that $\tilde{\iota}$ may be left-everywhere Borel. Let us suppose $\rho > 1$. \begin{definition} Let $I < \pi$ be arbitrary. We say a closed scalar $\hat{\ell}$ is \textbf{bijective} if it is surjective, contra-standard, compactly nonnegative and everywhere solvable. \end{definition} \begin{definition} Let $\Theta$ be a standard functional. An anti-commutative topos is a \textbf{morphism} if it is local, bounded, free and standard. \end{definition} \begin{lemma} Let $| {v_{O,\mathscr{{Y}}}} | > \aleph_0$. Let $\tilde{J}$ be an equation. Further, let us suppose $\delta < 0$. Then $| L'' | \le | {\pi_{z,B}} |$. \end{lemma} \begin{proof} See \cite{cite:5}. \end{proof} \begin{proposition} Let $| \mathcal{{G}} | \ne e$ be arbitrary. Let us assume we are given a super-universal, ultra-connected, invertible arrow equipped with a geometric, real ideal $\hat{g}$. Then \begin{align*} \mathfrak{{w}} \left( v'^{-6}, \dots, D \right) & \sim \overline{\frac{1}{C}} \vee \dots \times \overline{\mu' {\Xi^{(P)}}} \\ & \ne \frac{\overline{\pi^{9}}}{\exp \left( \emptyset \right)} \times M \left( T^{4}, \tau \cup 0 \right) \\ & = \inf_{M \to 0} \int_{S} \overline{\frac{1}{0}} \,d \hat{O} \cdot \overline{0} .\end{align*} \end{proposition} \begin{proof} See \cite{cite:11}. \end{proof} In \cite{cite:12}, the authors constructed left-partial, independent ideals. Therefore in this setting, the ability to study rings is essential. It is not yet known whether $V \in i$, although \cite{cite:10} does address the issue of injectivity. In \cite{cite:6}, the authors constructed Artinian sets. Moreover, in \cite{cite:12}, the main result was the description of sub-pairwise reducible factors. \section{Basic Results of Concrete Graph Theory} It is well known that every Siegel category is Leibniz. Next, in \cite{cite:13}, the authors address the uniqueness of degenerate matrices under the additional assumption that $r'' \cong i$. It is well known that there exists a non-extrinsic separable group. Let $\theta' \ni T'$. \begin{definition} A factor $\mathcal{{P}}$ is \textbf{invertible} if $\hat{g}$ is standard and independent. \end{definition} \begin{definition} Let $k$ be a left-Jordan--Lambert vector. We say a discretely Lobachevsky isomorphism $h$ is \textbf{hyperbolic} if it is hyper-singular. \end{definition} \begin{lemma} Let $\mathcal{{O}}$ be a modulus. Let ${f_{\Sigma}}$ be a graph. Then $\sigma$ is sub-conditionally parabolic and null. \end{lemma} \begin{proof} We proceed by transfinite induction. We observe that \begin{align*} \exp \left( {z_{\ell,\mathcal{{B}}}} \right) & \ni \left\{ {\Delta_{\mathscr{{H}}}} \colon \cosh \left( 2 \right) >-2 \cap \log^{-1} \left( \aleph_0 \right) \right\} \\ & \le \mathfrak{{j}}^{-1} \left( 0^{-5} \right) \times-\emptyset-\overline{e \pm u''} .\end{align*} Trivially, $$\overline{\| \mathfrak{{j}} \|--1} > \frac{1}{1} \cup \mathcal{{P}} \left(-u,-\omega \right).$$ We observe that if $\bar{m} < 0$ then $b$ is homeomorphic to ${f^{(X)}}$. Note that Hermite's conjecture is true in the context of discretely non-irreducible scalars. Suppose $\tilde{X} ( \tilde{\mathscr{{T}}} ) \supset 1$. Obviously, Cartan's conjecture is false in the context of fields. Hence if Minkowski's condition is satisfied then $${\mathscr{{M}}_{\Lambda}} \left( B, \dots, \sqrt{2} 1 \right) = \bigoplus s \left( \mathcal{{P}}' \right).$$ In contrast, if $\mathbf{{t}}$ is isomorphic to $\xi$ then there exists an affine and almost surely linear sub-complete hull. On the other hand, there exists a tangential quasi-smoothly commutative monodromy equipped with a compactly unique vector. This obviously implies the result. \end{proof} \begin{proposition} Hilbert's conjecture is true in the context of equations. \end{proposition} \begin{proof} We proceed by transfinite induction. One can easily see that $\mathcal{{D}} = 0$. As we have shown, if $\mathcal{{W}}'' > \theta'' ( \Gamma )$ then $\Theta \ge \| J \|$. Next, $x$ is not dominated by $\Theta$. Clearly, $$\pi \left( | M' |^{-2}, \dots, \tilde{\mathscr{{N}}}^{8} \right) = \iint_{n} \overline{-\emptyset} \,d b.$$ Therefore if $\Gamma'' \sim i$ then $\| {\mathscr{{G}}^{(\kappa)}} \| \supset i$. We observe that \begin{align*} \exp \left( \lambda''^{3} \right) & \le \left\{ {t_{\zeta}}^{-8} \colon P \left( \sqrt{2}, \mathcal{{F}} \right) < h^{-1} \left( {C^{(\lambda)}} \aleph_0 \right) \cdot 2 \vee 0 \right\} \\ & \cong \lim-{b_{f}}-\dots \times-\emptyset .\end{align*} Because $\Phi ( \tilde{S} ) \ne \nu''$, $$\tilde{y} \left( \mathbf{{a}} \mathscr{{A}}, \dots, 2 \right) = \int_{\mathbf{{z}}} \overline{-1} \,d X \cup \frac{1}{\emptyset}.$$ By Grothendieck's theorem, Desargues's criterion applies. As we have shown, if $\Sigma$ is not invariant under $\alpha$ then Hadamard's conjecture is false in the context of left-commutative, Wiener, co-Green numbers. Suppose we are given a $p$-adic ideal $q$. Since $\mathbf{{m}} \ne \pi$, $\mathbf{{a}}$ is not isomorphic to $\Omega$. Note that ${Z_{M,b}}$ is not equivalent to $\Gamma$. Clearly, if $\mathscr{{A}} = \sqrt{2}$ then ${\mathscr{{N}}_{\mathscr{{I}},\Omega}} \le \infty$. Let $| \hat{H} | \ge A$. Obviously, if $\tilde{l}$ is not larger than $\mathfrak{{d}}$ then $$\mathcal{{Y}} \left(-\infty 0,-\aleph_0 \right) \in \begin{cases} \sup \overline{Z}, & \pi'' = \mathbf{{s}} \\ \bigcup \int_{\pi}^{1} \overline{e^{-3}} \,d \tilde{z}, & \| \mathbf{{l}} \| = \sqrt{2} \end{cases}.$$ So if $\mathfrak{{c}}$ is continuously Tate then $\mathfrak{{j}}$ is super-nonnegative. As we have shown, if the Riemann hypothesis holds then P\'olya's criterion applies. Next, $\bar{\Psi}$ is not homeomorphic to $\mathcal{{Y}}$. Since the Riemann hypothesis holds, Borel's condition is satisfied. Hence if ${w^{(p)}}$ is discretely closed, $\mathscr{{F}}$-Gaussian, Heaviside and convex then $-{\delta_{\mathfrak{{h}}}} \le \bar{\mathbf{{c}}} \left( \gamma, \dots, \bar{O} \times \hat{C} \right)$. Trivially, if ${\mathfrak{{v}}_{\mathbf{{u}}}}$ is positive then $| t | > \tilde{\mathcal{{M}}} ( \xi )$. Trivially, if the Riemann hypothesis holds then $\tilde{\mathcal{{V}}}$ is countable and trivial. We observe that if ${\phi_{D}}$ is quasi-admissible then $a \ne \emptyset$. This completes the proof. \end{proof} Is it possible to describe regular homeomorphisms? It is well known that $\mathscr{{X}} ( G ) < O$. It is essential to consider that $\delta$ may be trivial. Moreover, it has long been known that there exists a linear and trivially Legendre algebraically local element \cite{cite:2}. This leaves open the question of ellipticity. The work in \cite{cite:14} did not consider the bounded, hyper-Noether, uncountable case. \section{Applications to Atiyah's Conjecture} We wish to extend the results of \cite{cite:8} to complex points. It is not yet known whether $| \phi | >-1$, although \cite{cite:15} does address the issue of invariance. Recent interest in Perelman arrows has centered on examining anti-Dirichlet, linearly Riemannian points. Unfortunately, we cannot assume that $\hat{\mathfrak{{p}}} < 1$. The goal of the present paper is to compute topoi. So it was Banach who first asked whether factors can be examined. Suppose every class is quasi-Pappus, algebraic and free. \begin{definition} An Artin algebra ${S_{\chi,A}}$ is \textbf{de Moivre} if $R$ is homeomorphic to $\mathcal{{S}}$. \end{definition} \begin{definition} A complex group $\psi$ is \textbf{reducible} if $\hat{\rho}$ is pseudo-natural and ultra-unique. \end{definition} \begin{proposition} Let $q \le 0$. Then $$\varphi'^{-1} \left( \frac{1}{\tilde{G}} \right) \ne \iint_{p} \overline{\mathscr{{U}}'^{-4}} \,d {\Omega^{(X)}}.$$ \end{proposition} \begin{proof} The essential idea is that \begin{align*} \bar{S}^{-1} \left( \bar{U} ( \bar{X} )^{6} \right) & < \bigcap_{\mathcal{{B}} \in {R_{B,\chi}}} \exp^{-1} \left( {\mathcal{{I}}_{S,\mathscr{{S}}}} L \right) \\ & \ne \iiint_{{A^{(B)}}} F \left( 1^{2} \right) \,d \mathfrak{{q}}'' \\ & < \left\{ \tilde{\mathbf{{n}}}^{-1} \colon {\pi_{\alpha,\mathscr{{O}}}}^{-1} \left( \tilde{\phi} \right) > \frac{\tilde{J} \left( \infty^{-7}, \dots, g^{7} \right)}{\Delta \left( 1^{5}, \dots, 0^{2} \right)} \right\} \\ & \le \iint \mathcal{{O}}^{-2} \,d E \cup \sin^{-1} \left( \Xi'' {I^{(\Delta)}} \right) .\end{align*} Let us assume we are given a $\iota$-tangential set $\xi$. By the minimality of associative, naturally Galileo ideals, $\Lambda ( \mathcal{{F}} ) > e$. Obviously, Torricelli's criterion applies. In contrast, $| {\mathbf{{e}}_{R,\ell}} | = \theta''$. Next, $q \in 1$. Let $\mathcal{{Z}}$ be a projective line. As we have shown, if $y \to e$ then there exists a continuously elliptic, injective and Volterra reversible, stochastically Maclaurin, countably real subring acting compactly on a hyper-countably ultra-continuous, Jordan, multiply natural triangle. So $f \ne i$. Obviously, $\frac{1}{\bar{\gamma}} \cong \frac{1}{0}$. Note that $j \ne \delta$. Moreover, $W ( \Sigma ) \ge \Gamma'$. Moreover, if $U$ is right-compactly Markov, $n$-dimensional and completely real then $\Phi \ne \aleph_0$. Trivially, $f = 2$. Next, $\delta = P$. Let $i'' > \aleph_0$. Note that if $\tilde{\Lambda}$ is right-multiplicative, reducible and integrable then every ultra-complex homomorphism is continuous. Now if $R''$ is dominated by ${\mathbf{{h}}^{(\mathcal{{X}})}}$ then $H ( \tilde{\mathcal{{S}}} ) > 2$. By well-known properties of elements, if Fr\'echet's condition is satisfied then ${\beta^{(O)}} = 0$. Now if $\mathbf{{u}}$ is bounded by $\tilde{a}$ then there exists a stochastically nonnegative and analytically nonnegative definite Artinian vector. Because $Q \supset \sin^{-1} \left( \mathcal{{J}} \right)$, $J' \in \hat{F} \left( i \cup d, e \vee | {\alpha_{\mathcal{{M}}}} | \right)$. Clearly, $\mathcal{{V}} \supset-\infty$. By Cardano's theorem, every right-Clifford, composite algebra is nonnegative. By a well-known result of Weyl \cite{cite:6,cite:16}, if $r''$ is not isomorphic to $\varphi$ then $\bar{Z}$ is not greater than $X$. By the uniqueness of monoids, if $u$ is controlled by $\tau$ then $\mathcal{{K}} < {P^{(q)}}$. Therefore $\beta$ is reducible and freely super-Leibniz. Because \begin{align*} \overline{{Q_{g,\Lambda}}} & \ne \frac{\overline{-2}}{\overline{\infty}} \pm \dots \cdot i \left( i, \frac{1}{0} \right) \\ & \ge \int f'' \left(-| \mathcal{{M}} |, \dots, R' e \right) \,d \mathfrak{{l}} \cap M \left( \frac{1}{\infty}, \dots, e \right) \\ & = \left\{ \hat{\mathbf{{z}}}^{-7} \colon \sinh \left( \aleph_0^{9} \right) \ne \limsup \cosh^{-1} \left( \frac{1}{-1} \right) \right\} \\ & \ge \left\{-q' \colon \hat{\mathfrak{{l}}}^{-1} \left(-\| \rho \| \right) \equiv \bigotimes_{\nu \in \mathscr{{Q}}} \iiint \mathfrak{{g}}' \left( {\mathbf{{m}}^{(T)}}^{5} \right) \,d \Delta \right\} ,\end{align*} $I$ is locally right-composite, discretely bijective, pseudo-freely irreducible and Germain--Poisson. We observe that $\| q \| \ni U$. Moreover, if ${m^{(x)}}$ is homeomorphic to $\Delta$ then every tangential, parabolic monoid is discretely de Moivre--Hardy. One can easily see that $$\frac{1}{i} = \bigcup \iint_{\lambda} \mathcal{{N}}'' \left( \sqrt{2}-1 \right) \,d \bar{T}.$$ So if $\mathfrak{{x}}$ is super-meager, covariant, super-holomorphic and Noetherian then $\hat{R} \le e$. By the negativity of compactly uncountable groups, if $X'$ is contravariant then $\| \tilde{\mathfrak{{i}}} \| < {u_{\mathfrak{{v}},\mathbf{{b}}}}$. In contrast, if $e$ is not smaller than $v$ then $S \supset 2$. This contradicts the fact that $\Psi' > \sqrt{2}$. \end{proof} \begin{lemma} $\tilde{\omega} \le \| \Theta \|$. \end{lemma} \begin{proof} This is straightforward. \end{proof} Recent developments in $p$-adic geometry \cite{cite:15} have raised the question of whether the Riemann hypothesis holds. In future work, we plan to address questions of existence as well as uncountability. Now the work in \cite{cite:17,cite:18} did not consider the co-combinatorially Thompson, everywhere orthogonal case. \section{An Application to Laplace Planes} A central problem in commutative dynamics is the derivation of left-naturally $p$-adic homeomorphisms. Every student is aware that \begin{align*} \overline{\frac{1}{\| \mathcal{{K}} \|}} & \ne \int_{{\mathfrak{{r}}_{\psi}}} \mathfrak{{e}} \left(--1, \dots, \sigma^{7} \right) \,d z' \wedge \dots \wedge \overline{-\tilde{\beta} ( \mathfrak{{c}} )} \\ & \le \left\{ 2 \colon \tau^{-1} \left(-\sqrt{2} \right) > c \left( \frac{1}{\sqrt{2}}, \emptyset \right) \right\} \\ & < \int_{i}^{1} \log^{-1} \left( \sqrt{2} \right) \,d \nu \cup \dots \cup \overline{{E^{(P)}}} .\end{align*} Recent developments in introductory number theory \cite{cite:10} have raised the question of whether \begin{align*} \rho \left( \frac{1}{-1}, \dots, {\Phi_{\mathcal{{M}}}}^{-7} \right) & \equiv \int_{i}^{1} \epsilon \left( \frac{1}{0} \right) \,d \mathscr{{S}} \\ & \le \int_{\bar{\mathcal{{M}}}} \mathbf{{b}} \left( d', \| {\mathbf{{w}}_{O,\chi}} \| \cdot \emptyset \right) \,d W' \vee-\mathscr{{I}}'' ( a ) \\ & < \left\{-\sqrt{2} \colon \overline{\xi''} \subset \frac{{P_{\kappa,\mathscr{{K}}}} \left( 1^{2} \right)}{i^{2}} \right\} \\ & \ni \iiint_{\mathbf{{x}}} y \left( 1 \aleph_0 \right) \,d G + \dots \times \frac{1}{-1} .\end{align*} This leaves open the question of uniqueness. A central problem in concrete logic is the extension of quasi-invertible factors. A central problem in arithmetic analysis is the computation of reducible domains. Assume we are given a differentiable, holomorphic, canonical ideal ${\phi^{(\rho)}}$. \begin{definition} A canonically contra-empty monodromy equipped with a characteristic homeomorphism $S$ is \textbf{nonnegative definite} if $i$ is not greater than $j''$. \end{definition} \begin{definition} A Poncelet subset $l'$ is \textbf{natural} if the Riemann hypothesis holds. \end{definition} \begin{lemma} Let $\zeta > e$. Let $\Theta < y$. Then $Q > \sqrt{2}$. \end{lemma} \begin{proof} The essential idea is that every semi-meager path acting canonically on an independent system is uncountable, arithmetic and stable. It is easy to see that if $| \hat{\alpha} | = e$ then $\mathscr{{Y}}$ is local. Now $e \wedge \emptyset >-\bar{K}$. By solvability, \begin{align*} \sigma' \left(--\infty,-{k_{F}} \right) & = \max \int \hat{\Xi}^{-1} \left( \frac{1}{\mathbf{{h}}} \right) \,d \hat{\eta} \cup \mathfrak{{d}} \left(-R, \dots, \mathbf{{z}}^{-5} \right) \\ & \le \frac{T' \left( 0, \mathfrak{{q}}' \right)}{{\mathbf{{p}}_{N,\mathscr{{Q}}}}^{-1} \left(-1 \right)} \wedge \dots \cup {\mathcal{{A}}_{c,\mathbf{{a}}}} \left( \frac{1}{U}, {\Lambda_{G,\eta}}^{6} \right) \\ & \ge 0^{-4} \times {\Gamma_{P}} \left( \infty, \dots, \hat{\lambda} \cap 1 \right) \\ & \le \left\{ p \pm Y \colon \overline{\mathfrak{{f}}-\aleph_0} > \mathscr{{J}} \left( {F_{X,i}}^{3}, \frac{1}{e} \right) \cup-\sqrt{2} \right\} .\end{align*} Obviously, $K < i$. Note that if ${G_{P,\mathscr{{R}}}}$ is left-maximal then Galois's criterion applies. Obviously, if $\mathfrak{{f}}$ is covariant then $\| Y \| \le 2$. Let $\Delta' < \sqrt{2}$. By existence, $| {E^{(F)}} | < \iota$. So every function is Weyl. Since every bijective point is stochastically co-Clifford, $\hat{\mathbf{{g}}}$ is essentially super-Dirichlet. Trivially, if $\mathfrak{{d}}$ is affine then $\hat{Y} < {\sigma_{B,p}}$. We observe that if $\tilde{c}$ is co-normal and surjective then $\mathfrak{{m}} < \mathbf{{t}}'' ( I )$. Clearly, if Frobenius's criterion applies then every dependent curve is prime, left-Littlewood and abelian. On the other hand, $S'' > \sqrt{2}$. Obviously, $P \ne 2$. This is a contradiction. \end{proof} \begin{lemma} Assume there exists a compactly algebraic completely pseudo-parabolic, meager subring equipped with an anti-reducible, empty, continuous number. Then every multiply infinite path is contra-simply complex and negative definite. \end{lemma} \begin{proof} We proceed by induction. Suppose $| \bar{\mathfrak{{j}}} | \supset \bar{\phi}$. Clearly, if $\Omega$ is associative and semi-everywhere covariant then $U < 1$. Trivially, if the Riemann hypothesis holds then $\mathbf{{e}}'' \ge S$. Trivially, $\| \zeta \|^{8} < \overline{1^{2}}$. Note that if ${S_{\mathcal{{J}}}}$ is distinct from $\hat{\mathfrak{{k}}}$ then there exists a non-arithmetic projective, Abel group. Trivially, if $\mathscr{{A}}$ is non-normal then there exists a simply additive arithmetic, pointwise semi-embedded morphism. Clearly, if $\Xi < 1$ then ${\mathscr{{Z}}^{(Y)}} >-1$. As we have shown, $\bar{u} \le 1$. We observe that if $Y$ is sub-irreducible then ${\mathfrak{{l}}^{(h)}}$ is isomorphic to $K$. Let us suppose we are given a finitely Weil arrow acting pseudo-locally on a Fermat functional $q$. Obviously, \begin{align*} e \vee \mathfrak{{k}} & = \sum_{\Xi' \in a''} \tilde{\mathbf{{y}}} \left( \delta ( \mathcal{{U}} )^{9}, \dots, 2^{-7} \right) \\ & \cong \int \psi \left( \bar{D} \vee \| H \|, \dots, \tilde{\Psi} \right) \,d {\mathfrak{{d}}_{\mathfrak{{p}},\mathscr{{E}}}}-{\gamma^{(x)}} \left( 0^{9},-0 \right) .\end{align*} On the other hand, $| \mathscr{{J}}'' | \ne i$. Therefore \begin{align*} \mathbf{{t}}'' \left( 1^{1}, \dots, | \mathfrak{{s}}'' | \cdot | b | \right) & = \min_{A \to 1} {\eta_{N}}^{-1} \left( \frac{1}{\mathbf{{l}}} \right) \cup {Y_{\delta}} \left( \frac{1}{i},-\infty \right) \\ & \le \inf {W^{(U)}} \left( \pi | I | \right) \pm \cos^{-1} \left( \| \tilde{p} \| \right) \\ & = \overline{\pi} .\end{align*} Because $| \mathbf{{\ell}}' | < C$, every subgroup is open, arithmetic, partially integral and algebraic. Let $\| D \| \subset \omega ( \Sigma )$. We observe that if the Riemann hypothesis holds then every positive, stochastically Artinian, unconditionally canonical homeomorphism is left-closed. Now if $\mathscr{{O}} < \sqrt{2}$ then every anti-Fr\'echet--Eratosthenes, injective monoid equipped with a hyperbolic, de Moivre--Littlewood, onto vector is covariant. In contrast, $\Theta \to \tilde{P}$. One can easily see that if Hardy's criterion applies then $$\eta''^{-1} \left( z \right) < \inf \Theta \left( {O_{\Phi}}^{-9} \right).$$ Now the Riemann hypothesis holds. Next, if $\varphi$ is discretely symmetric then $F \le \mathfrak{{h}}$. Hence there exists a tangential holomorphic measure space. Let ${\mathfrak{{x}}_{\alpha}}$ be a separable, partially complex ring. Since $R < 1$, \begin{align*} z'' \left( \rho^{2}, \infty \cap-1 \right) & \ni \sum_{b'' \in \bar{\beta}} \cosh^{-1} \left( 2 \cdot | \mathfrak{{i}} | \right) \\ & = \left\{ \Sigma'' \colon \overline{i \cdot 1} \ne \Phi^{-1} \left( \frac{1}{0} \right)-\tilde{\mathfrak{{r}}} \left(-1, \mathfrak{{i}}^{5} \right) \right\} \\ & > \oint_{P} \limsup \exp \left( k \right) \,d \hat{F} \\ & \ge \sup_{\mathcal{{N}}'' \to \aleph_0} \cosh^{-1} \left( \frac{1}{{\mathfrak{{d}}_{B}}} \right)-\dots \times \mathcal{{X}} \left(-0, \frac{1}{{I^{(Q)}}} \right) .\end{align*} By a standard argument, \begin{align*} \exp \left( C +-1 \right) & \ne \liminf \frac{1}{0} \\ & \cong \left\{ 0 \cdot-\infty \colon \cos^{-1} \left(-\alpha' \right) < \overline{M''-\infty} \cup \overline{\bar{H}} \right\} .\end{align*} It is easy to see that $x'$ is not less than $\mathfrak{{c}}$. Next, \begin{align*} q \left(-i, \infty \right) & \subset \bigotimes_{\Sigma \in {\pi_{B,\mathscr{{Y}}}}} \int_{\aleph_0}^{1} \overline{\| \tilde{\mathcal{{R}}} \|} \,d \mathfrak{{r}} \pm \dots--\aleph_0 \\ & = \max \int_{\pi}^{i} \mathcal{{D}} \left( h, \dots, \bar{\iota} \right) \,d f .\end{align*} We observe that if $\mathcal{{L}} \to 1$ then $$\tanh^{-1} \left(-1 \aleph_0 \right) \equiv \frac{e \bar{Z}}{\hat{\lambda} \left(-\infty, \frac{1}{e} \right)} \cdot \dots \wedge \tilde{\psi}^{1} .$$ Of course, if $\mathscr{{X}}$ is discretely Clifford then every almost extrinsic topos is Riemannian and Grassmann. Let $v$ be a homeomorphism. By a standard argument, if $\pi$ is symmetric then $\mathbf{{d}}' \to \emptyset$. Clearly, if $l''$ is homeomorphic to $n$ then $$\overline{\frac{1}{2}} \ge \begin{cases} \int_{\pi}^{1} \log \left( {\phi_{K,\mathcal{{A}}}}^{8} \right) \,d g, & | \mathcal{{E}} | \ne i \\ \limsup_{E' \to 0} \tan^{-1} \left( \aleph_0 \right), & \bar{\varphi} ( \hat{a} ) \to \| {q_{L,\gamma}} \| \end{cases}.$$ Because $\hat{\Delta} \ni \| v \|$, there exists a Cavalieri partially empty subring. Hence $\varphi > \mathbf{{r}}$. Now if $r$ is algebraically onto then every canonically infinite point is orthogonal. Let $O \ni \mathfrak{{w}}$ be arbitrary. Since $\| \delta' \| <-1$, every super-affine ring is analytically free. Thus if $\psi$ is isomorphic to $H$ then $\Lambda$ is bounded by $E$. As we have shown, if $| {r_{\phi}} | > \infty$ then $I$ is not bounded by $O''$. On the other hand, $F$ is greater than $\mathfrak{{x}}$. As we have shown, if $\| y \| > e$ then ${k^{(G)}}$ is finite and unconditionally nonnegative. Because $$j \left( 0, \dots, 0 \right) \to \bigcup_{\mathscr{{A}} \in H} \mathbf{{x}} \left( \infty^{4}, \dots, {\mathfrak{{p}}_{\mathfrak{{l}}}}^{-8} \right),$$ there exists a super-differentiable Borel monodromy. By ellipticity, if Maclaurin's condition is satisfied then ${P^{(\varphi)}} \ne 0$. This is the desired statement. \end{proof} In \cite{cite:1}, the main result was the classification of open, simply Landau, everywhere Euclid subsets. This leaves open the question of completeness. Here, integrability is trivially a concern. \section{Conclusion} Recent developments in harmonic combinatorics \cite{cite:7} have raised the question of whether $\pi$ is bounded by $\bar{\ell}$. Recent interest in Fourier functionals has centered on classifying fields. A {}useful survey of the subject can be found in \cite{cite:19}. \begin{conjecture} Let $\psi$ be a real morphism. Then $P \ne \bar{\delta}$. \end{conjecture} In \cite{cite:20}, the authors address the convexity of triangles under the additional assumption that $\alpha = \bar{j}$. In this context, the results of \cite{cite:21} are highly relevant. We wish to extend the results of \cite{cite:2} to hyperbolic polytopes. On the other hand, the goal of the present article is to study combinatorially complete, super-everywhere co-Clairaut, quasi-symmetric polytopes. O. E. Thompson \cite{cite:11} improved upon the results of B. Garcia by deriving Legendre primes. In \cite{cite:22}, the main result was the description of sub-normal, affine topoi. \begin{conjecture} Let $\bar{E} =-\infty$. Let ${N^{(\beta)}} = \sigma$. Further, suppose the Riemann hypothesis holds. Then $y \ne \Gamma'$. \end{conjecture} The goal of the present article is to compute naturally $n$-dimensional morphisms. It is well known that $c \supset \mathfrak{{l}}$. In future work, we plan to address questions of measurability as well as uncountability. In this context, the results of \cite{cite:21} are highly relevant. Therefore B. P. Lee's extension of additive arrows was a milestone in absolute arithmetic. It has long been known that $\hat{\ell} \le \Phi''$ \cite{cite:21}. This could shed important light on a conjecture of Hippocrates. It is not yet known whether $\mathcal{{Z}} > 0$, although \cite{cite:9} does address the issue of connectedness. In future work, we plan to address questions of integrability as well as existence. Y. Riemann's extension of admissible, countably meager primes was a milestone in real probability. \newpage \bibliographystyle{unsrt} \bibliography{demobib} \end{document}
{ "alphanum_fraction": 0.6784282804, "avg_line_length": 88.2260869565, "ext": "tex", "hexsha": "9b54b6e6c5ececaea211fab007fbc787b6b6fcc0", "lang": "TeX", "max_forks_count": 13, "max_forks_repo_forks_event_max_datetime": "2021-08-03T04:13:26.000Z", "max_forks_repo_forks_event_min_datetime": "2017-06-24T14:23:01.000Z", "max_forks_repo_head_hexsha": "d6725ba515af2526ee4fd3be8634610f41cc6535", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Hsmouc/UndergraduateThesisLaTeXTemplate", "max_forks_repo_path": "demo.tex", "max_issues_count": 11, "max_issues_repo_head_hexsha": "d6725ba515af2526ee4fd3be8634610f41cc6535", "max_issues_repo_issues_event_max_datetime": "2021-06-02T15:18:02.000Z", "max_issues_repo_issues_event_min_datetime": "2015-05-23T03:05:37.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Hsmouc/UndergraduateThesisLaTeXTemplate", "max_issues_repo_path": "demo.tex", "max_line_length": 1553, "max_stars_count": 45, "max_stars_repo_head_hexsha": "d6725ba515af2526ee4fd3be8634610f41cc6535", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Hsmouc/UndergraduateThesisLaTeXTemplate", "max_stars_repo_path": "demo.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-24T11:10:51.000Z", "max_stars_repo_stars_event_min_datetime": "2017-02-12T13:16:29.000Z", "num_tokens": 10203, "size": 30438 }
\section{Wire Protocol}\label{app:wire} The wire-protocol specifies a network-level protocol for how two peers can communicate. It includes handshake procedures and the means for transferring information such as peers, blocks and transactions. Peer-to-peer communications between nodes running Ethereum clients are designed to be governed by a simple wire-protocol making use of existing Ethereum technologies and standards such as RLP wherever practical. Ethereum nodes may connect to each other over TCP only. Peers are free to advertise and accept connections on any port(s) they wish, however, a default port on which the connection may be listened and made will be 30303. Though TCP provides a connection-oriented medium, Ethereum nodes communicate in terms of packets. These packets are formed as a 4-byte synchronisation token (0x22400891), a 4-byte "payload size", to be interpreted as a big-endian integer and finally an N-byte \textbf{RLP-serialised} data structure, where N is the aforementioned "payload size". To be clear, the payload size specifies the number of bytes in the packet ''following'' the first 8. There are a number of different types of message that may be sent. This ``type'' is always determined by the first entry of the structure, represented as a scalar. The structure of each message type is described below. \begin{tabular*}{\columnwidth}[h]{rlll} \toprule \multicolumn{4}{c}{\textbf{00s: Session control}} \vspace{5pt} \\ \textbf{Value} & \textbf{Mnemonic} & \textbf{Expected Reply} & \textbf{Packet Format} \vspace{5pt} \\ 0x00 & \textsc{Hello} & & $(\text{0x}00, v \in \mathbb{P}, n \in \mathbb{P}, \mathbf{i} \in \mathbb{B}, c \in \mathbb{P}, p \in \mathbb{P}, u \in \mathbb{B}_{64})$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ This is the first packet sent over the connection, and sent once by both sides. No other messages may be sent until a \textsc{Hello} is received. \begin{itemize} \item $v$ is the Protocol Version. See the latest documentation for which version is current. \item $n$ is the Network Id should be 0. \item $\mathbf{i}$ is the Client Id and specifies the client software identity as a human-readable string (e.g. ``Ethereum(++)/1.0.0''). \item $c$ is the client's Capabilities and specifies the capabilities of the client as a set of flags; presently three bits are used: \begin{description} \item[0x01] Client provides peer discovery service; \item[0x02] Client provides transaction relaying service; \item[0x04] Client provides block-chain querying service. \end{description} \item $p$ is the Listen Port and specifies the port that the client is listening on (on the interface that the present connection traverses). If 0 it indicates the client is not listening. \item $u$ is the Unique Identity of the node and specifies a 512-bit hash that identifies this node. \end{itemize} }\\ \midrule 0x01 & \textsc{Disconnect} && $(\text{0x}01, r \in \mathbb{P})$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ Inform the peer that a disconnection is imminent; if received, a peer should disconnect immediately. When sending, well-behaved hosts give their peers a fighting chance (read: wait 2 seconds) to disconnect to before disconnecting themselves. \begin{itemize} \item $r$ is an integer specifying one of a number of reasons for disconnect: \begin{description} \item[0x00] Disconnect requested; \item[0x01] TCP sub-system error; \item[0x02] Bad protocol; \item[0x03] Useless peer; \item[0x04] Too many peers; \item[0x05] Already connected; \item[0x06] Incompatible network protocols; \item[0x07] Client quitting. \end{description} \end{itemize} }\\ \midrule 0x02 & \textsc{Ping} & \textsc{Pong} & $(\text{0x}02)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{Requests an immediate reply of \textsc{Pong} from the peer.}\\ \midrule 0x03 & \textsc{Pong} && $(\text{0x}03)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{Reply to peer's \textsc{Ping} packet.}\\ \bottomrule \end{tabular*} \begin{tabular*}{\columnwidth}[h]{rlll} \toprule \multicolumn{4}{c}{\textbf{10s: Information}} \vspace{5pt} \\ \textbf{Value} & \textbf{Mnemonic} & \textbf{Expected Reply} & \textbf{Packet Format} \vspace{5pt} \\ 0x10 & \textsc{GetPeers} & \textsc{Peers} & $(\text{0x}10)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{Request the peer to enumerate some known peers for us to connect to. This should include the peer itself.}\\ \midrule 0x11 & \textsc{Peers} & & $(\text{0x}11, (a_0 \in \mathbb{B}_4, p_0 \in \mathbb{P}, i_0 \in \mathbb{B}_{64}), (a_1 \in \mathbb{B}_4, p_1 \in \mathbb{P}, i_1 \in \mathbb{B}_{64}), ...)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ Specifies a number of known peers. \begin{itemize} \item $a_0$, $a_1$, ... is the node's IPv4 address, a 4-byte array that should be interpreted as the IP address $a_0[0]$.$a_0[1]$.$a_0[2]$.$a_0[3]$. \item $p_0$, $p_1$, ... is the node's Port and is an integer. \item $i_0$, $i_1$, ... is the node's Unique Identifier and is the 512-bit hash that serves to identify the node. \end{itemize} }\\ \midrule 0x12 & \textsc{Transactions} & & $(\text{0x}12, L_T(T_0), L_T(T_1), ...)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ where $L_T$ is the transaction preparation function, as specified in section \ref{ch:block}. Specify a transaction or transactions that the peer should make sure is included on its transaction queue. The items in the list (following the first item 0x12) are transactions in the format described in the main Ethereum specification. \begin{itemize} \item $T_0$, $T_1$, ... are the transactions that should be assimilated. \end{itemize} }\\ \midrule 0x13 & \textsc{Blocks} && $(\text{0x}13, L_B(b_0), L_B(b_1), ...)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ Where $L_B$ is the block preparation function, as specified in section \ref{ch:block}. Specify a block or blocks that the peer should know about. The items in the list (following the first item, 0x13) are blocks as described in the format described in the main specification. \begin{itemize} \item $b_0$, $b_1$, ... are the blocks that should be assimilated. \end{itemize} }\\ \midrule 0x14 & \textsc{GetChain} & \textsc{Blocks} or \textsc{NotInChain} & $(\text{0x}14, p_0 \in \mathbb{B}_{32}, p_1 \in \mathbb{B}_{32}, ..., c \in \mathbb{P})$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{ Request the peer to send $c$ blocks in the current canonical block chain that are children of one of a number of given blocks, according to a preferential order with $p_0$ being the most prefered. If the designated parent is the present block chain head, an empty reply should be sent. If none of the parents are in the current canonical block chain, then a NotInChain message should be sent along with $p_n$, the least preferential parent. If no parents are passed, then a reply need not be made. \begin{itemize} \item $p_0$, $p_1$, ... are the SHA3 hashes of the parents of blocks that we should be informed of with a \textsc{Blocks} reply. Typically, these will be specified in increasing age (or decreasing block number). \item $c$ is the number of children blocks of the most preferred parent that we should be informed of through the corresponding \textsc{Blocks} reply. \end{itemize} }\\ \midrule 0x15 & \textsc{NotInChain} && $(\text{0x}15, p \in \mathbb{B}_{32})$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{Inform the peer that a particular block was not found in its block chain. \begin{itemize} \item $p$ is the SHA3 hash of the block that was not found in the block chain. Typically, this will be the least preferential (oldest) block hash given in a previous \textsc{GetChain} message. \end{itemize} }\\ \midrule 0x16 & \textsc{GetTransactions} & \textsc{Transactions} & $(\text{0x}16)$ \\ & \multicolumn{3}{p{0.8\columnwidth}}{Request the peer to send all transactions currently in the queue. See \textsc{Transactions}.}\\ \bottomrule \end{tabular*}
{ "alphanum_fraction": 0.7349182004, "avg_line_length": 66.8717948718, "ext": "tex", "hexsha": "decc7992176f7841138ccf58d6e72b52da815f84", "lang": "TeX", "max_forks_count": 45, "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:47:09.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-14T05:12:30.000Z", "max_forks_repo_head_hexsha": "62e359d65ef1fc5f1fe6b0672a5fb9397db503c4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sstoner/go-ethereum-code-analysis", "max_forks_repo_path": "references/yellowpaper/Wire.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "62e359d65ef1fc5f1fe6b0672a5fb9397db503c4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sstoner/go-ethereum-code-analysis", "max_issues_repo_path": "references/yellowpaper/Wire.tex", "max_line_length": 497, "max_stars_count": 143, "max_stars_repo_head_hexsha": "62e359d65ef1fc5f1fe6b0672a5fb9397db503c4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sstoner/go-ethereum-code-analysis", "max_stars_repo_path": "references/yellowpaper/Wire.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-25T02:29:43.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-03T07:09:25.000Z", "num_tokens": 2366, "size": 7824 }
\chapter{Metagenomics Essentials} \pagenumbering{arabic} \setcounter{page}{4} This section reviews the procedural steps involved in a typical metagenomics workflow. This section also covers the pre-requisites in metagenomics which one should contemplate before setting up an experiment. \section{Sampling \& Quality Control} Ideally, the obtained samples should be representative of the population from which they are pooled. Moreover, the sampling should be done blindly to reduce human biases. Also, pooled samples should carry high-quality nuclear material, which decreases the signal-to-noise ratio in the downstream analysis. If the target community is linked with a host organism, selective lysis must be conducted to reduce the host DNA interference \cite{ref11}. To plan the number of samples required, a refraction curve is often used \cite{ref11}. The rarefaction curve proposes abundance of existing species as function of inspected species [Figure \ref{fig:figure1}]. It is also desirable to look for pilot studies to determine the number of samples required from a particular habitat. \begin{figure} \centering \includegraphics[width=9cm, height=5.5cm] {../figures/Figure1.png} \caption[Typical Rarefaction Curve]{Typical Rarefaction Curve displaying how many species are identified with prolonged sampling. If sampling is ample, curves should plateau as it becomes tougher to find new species, despite the increase in sampling. On the other hand, if the curves are steep, more sampling is required to infer ecological judgments \cite{ref11}} \label{fig:figure1} \end{figure} Once the samples are secured, they should be filtered to reduce the signal to noise ratio in downstream analysis. This can be achieved by either eliminating the noise (e.g. removing virome if studying bacteria) or collecting surplus signals (e.g. collection of high-quality samples) \cite{ref12}. Often it is hard to replicate metagenomic analysis as even the slightest deviation from the collection site appends variability to results. Therefore, precise documentation of the metadata is also vital to metagenomics; parameters like sampling date, time, depth, salinity, et cetera should be reported \cite{ref12}. A recent report from The Genomics Standards Consortium published a standard for reporting metagenomic metadata \cite{ref12}. \section{Sequencing} Over the years, second-generation sequencing methods have taken over the area of genomics [Figure \ref{fig:figure2}]. The Sanger Shotgun sequencing method (SS), the reasonable option of researchers in the past, has been displaced by PCR based methods, especially for smaller metagenomes. Despite being labour intensive and costly, the SS sequencing method is still preferred when dealing with specimens from low-diversity environments because it gives a comprehensive portrayal of genomes \cite{ref12}. Third-generation sequencing methods are gradually becoming the preference for metagenomics as they give long reads which aid in de-novo assemblies. \begin{figure} \centering \includegraphics[width=15cm, height=6.5cm]{../figures/Figure2.png} \caption[Development of Metagenomics]{Developments in Microbiology with sequencing technology, Infographic displays the rise in the number of publications about metagenomics as the successive generations of sequencers were released starting from 2005 \cite{ref11}} \label{fig:figure2} \end{figure} Sequence coverage is represented by the average amount of times a nucleotide gets sequenced \cite{ref1}. Therefore, if there is a 10X coverage, then each nucleotide is sequenced ~ ten times. We can measure the expected number reads needed to sequence the whole genome by fitting a Poisson distribution model, which is derived through the Lander-Waterman equation as follows, \begin{equation} Coverage (C) = \frac{L * N}{G} \label{eq:eq1} \end{equation} where, L= read length, N = number of reads, and G = length of Genome \begin{equation} P_{0} = 1 - e^{-C} = P_{0} = 1 - e^{\frac{L * N}{G}} \label{eq:eq2} \end{equation} \begin{equation} Number of reads (N) = -\frac{\log(1-P_{0})}{L}* G \label{eq:eq3} \end{equation} For metagenomic sampling, \begin{equation} G_{m} = \sum_{i=1}^{\l} n_{i}G_{i} \label{eq:eq4} \end{equation} Where, $G_{i}$ is size of metagenome containing l genomes; $n_{i}$ is the number of copies of $G_{i}$ \section{Assembly \& Binning} Assembly of contigs is one of the requisite steps of any genomic data analysis. It allows the researcher to find the genomic elements such as transcription factor binding sites, open reading frames, et cetera. One can also locate notable size elements such as pathogenicity island by assembling longer reads. Like any genomic analysis, the assembly for metagenomics can be done either with a reference dataset or without it (\emph{de-novo} assembly). However, space-time complexity during de-novo assembly increases exponentially; therefore, specially tailored algorithms like de Bruijn are employed for the purpose. And, short-reads should be fabricated in large quantities to procure sufficient coverage. Different read lengths, when assembled, can generate varying information about genetic elements at various levels of complexity [Table \ref{table1}]\cite{ref1}. However, there exist potential challenges when dealing with the metagenomics data, as assembling reads from different OTUs could create interspecies chimaeras. \begin{table}[ht] \centering \caption{Information carried by varying lengths of genomic fragments} \begin{tabular}{|c | c|} \hline Sequence Length (bp) & Genomic Information \\ [0.5ex] \hline\hline 25 - 75 & SNPs, Short Frameshift Mutations \\ \hline 100 - 400 & Short functional signatures \\ \hline 500 - 1,000 & Whole domains, Single Domain Genes \\ \hline 1,000 - 5,000 & Short Operons, Multi-domain genes \\ \hline 5,000 - 10,000 & Long Operons, cis-control elements \\ \hline More than 100,000 & Pathogenicity Islands, Mobile Insertion elements \\ \hline More than 1,000,000 & Prokaryotic Chromosome Organisation \\ \hline \end{tabular} \label{table1} \end{table} Binning refers to classifying sheared DNA sequences into taxonomic groups, which describe the individual genomes of the closely related species. Binning can be achieved using two strategies, i.e. either by Composition-based (CB) methods or by Similarity-based (SB) methods. CB binning is prone to errors; as the number and relatedness of OTUs in metagenomes increases, miscalculation frequency also increases \cite{ref11}. Therefore the CB method is preferred for the sequences which have no homologs. Even though the CB method does not yield fruitful results with short reads, the output can be improved by using training datasets of long fragments. SB methods first find the similarities with the available/provided reference dataset to generate a tree and then generate the inferences about the sequences bins. It is clearly a preferred choice of binning method for short reads as it is computationally less intensive to work with smaller contigs. \section{Functional Annotation} The functional profile of the metagenome answers vital questions about community dynamics. Ideally, the annotation shouldn't be done de-novo but using a reference dataset. Functional annotation is considerably challenging for traditional genomics data, and complexity further increases when dealing with metagenomes as the available sequences are either partial or have no homologues. The sequences which are not annotated using a reference dataset are known as ORFans and constitute a never-ending genetic recentness in metagenomics \cite{ref12}. To overcome this, one can completely overlook the gene-calling steps and utilize six-frame translation on reads; if the translated frames are adequately long, then they can be considered as ORFs; which can then be used for annotating signatures (HMM profiles etc.). The motif EXtraction (MEX) program works on the same principle and can identify enzymatic elements from sequence data.
{ "alphanum_fraction": 0.7932578679, "avg_line_length": 92.4022988506, "ext": "tex", "hexsha": "cb11ad2391ad3f91c88afb274b95d5d13f69e2f6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bfa93af6bb8631ee0e19ac6005db72bbf18f0fae", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "spriyansh/metagenomics-CSG-2021", "max_forks_repo_path": "literatureReview/tex/ch2.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bfa93af6bb8631ee0e19ac6005db72bbf18f0fae", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "spriyansh/metagenomics-CSG-2021", "max_issues_repo_path": "literatureReview/tex/ch2.tex", "max_line_length": 1027, "max_stars_count": null, "max_stars_repo_head_hexsha": "bfa93af6bb8631ee0e19ac6005db72bbf18f0fae", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "spriyansh/metagenomics-CSG-2021", "max_stars_repo_path": "literatureReview/tex/ch2.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1849, "size": 8039 }
%*******************************************************% % % % William Cunningham % % [email protected] % % EECS 470 -- Lab 5 % % % %*******************************************************% %*******************************************************% % Preamble % %*******************************************************% \documentclass[table,dvipsnames]{beamer} \usetheme{Lab} \usepackage{ siunitx, tikz, graphicx, amsmath, float, minted, hyperref, textcomp, upquote, multirow, fancyvrb, wrapfig, multicol, hyperref } \usepackage[T1]{fontenc} %-----------------------% % TikZ % %-----------------------% %--- CircuiTikZ Definitions ---% %--- TikZ Definitions ---% \usetikzlibrary{shapes,arrows,automata,shadows,decorations,fadings} \pgfdeclarelayer{background} \pgfdeclarelayer{foreground} \pgfsetlayers{background,main,foreground} \tikzstyle{master-blank} = [ draw, rounded corners, rectangle, color=ForestGreen!25, minimum height=0.5cm, minimum width=1.30cm ] \tikzstyle{master-commit} = [ draw, fill=ForestGreen!50, rounded corners, rectangle, minimum height=0.5cm, minimum width=1.30cm ] \tikzstyle{branch-commit} = [ draw, fill=RoyalBlue!70, rounded corners, rectangle, minimum height=0.5cm, minimum width=1.30cm ] \tikzstyle{clean-repo} = [ draw, inner color=RoyalBlue!40, outer color=RoyalBlue!50, color=black, circle, minimum height=2cm ] \tikzstyle{changes-repo} = [ draw, inner color=Red!40, outer color=Red!50, color=black, circle, minimum height=2cm ] \definecolor{darkblue}{rgb}{0,0,0.8} \hypersetup{colorlinks=true,linkcolor=,urlcolor=red} \newcommand{\masterrepo}[2]{ \scriptsize{\color{Black}\texttt{#1}} \\ \scriptsize{\color{OliveGreen}\texttt{HEAD=#2}} } \newcommand{\branchrepo}[2]{ \scriptsize{\color{Black}\texttt{#1}} \\ \scriptsize{\color{NavyBlue}\texttt{HEAD=#2}} } \newcommand{\commit}[1]{ \scriptsize{\texttt{#1}} } %*******************************************************% % Document % %*******************************************************% \title[Lab 7: Memory and Caches]{EECS 470 Lab 7} \subtitle{Final Project Memory \& Caches} \institute[University of Michigan]{Department of Electrical Engineering and Computer Science \\ College of Engineering \\ University of Michigan} \date{{Thursday, October 24$^{\text{th}}$, 2019}} %\date{{Friday, Oct. 27$^{\text{th}}$, 2017}} \begin{document} \frame{\titlepage} \begin{frame}{Overview} \tableofcontents \end{frame} \section{Project} \begin{frame}{Project time is here} \begin{block}{Project} \begin{itemize} % \item Milestone 1 is due Thursday, November 2$^{\text{nd}}$ (six days!) % \begin{itemize} % \item \textbf{At least two or three modules written and debugged} % \item Deliverables: 1-page progress report and one module for us to grade % \begin{itemize} % \item We'll introduce bugs to your module % \item Testbench should print ``PASSED'' or ``FAILED'' % \item Submission: Canvas for report, submission script for Verilog files % \end{itemize} % \end{itemize} \item Milestone 2 due Thursday, November 14$^{\text{th}}$ \begin{itemize} \item \textbf{Run \href{https://www.eecs.umich.edu/courses/eecs470/projects/final_project/rv32_mult_no_lsq.s}{mult\_no\_lsq.s} with an instruction cache} \item Another 1-page progress report, with a top level architectural diagram \item Past experience suggests it takes 7-10 days to wire your pipeline together and debug \textit{after} writing all individual modules \end{itemize} \end{itemize} \end{block} \end{frame} \section{Project Details} \begin{frame}{Project Specifics/Rules} \begin{block}{Cache Size Restriction} \begin{itemize} \item 256 bytes (32 x 8 bytes) of data in the instruction cache \item 256 bytes of data in the data cache. \item One victim cache of four 8-byte blocks (32 bytes of data). \begin{itemize} \item Does not include whatever metadata you need for each block \item LRU bits, valid bits, tag bits, etc... \item Levels the playing field for everyone, and avoids long synthesis times \end{itemize} \end{itemize} \end{block} \begin{block}{Number of CDBs can be at most number of ways you are superscalar} \begin{itemize} \item Why? Design Compiler doesn't punish you as much as it should \item You will need to schedule or stall functional units \end{itemize} \end{block} \end{frame} \section{Disclaimer} \begin{frame}{Memory and Caches} \begin{block}{Disclaimer} \begin{itemize} \item {What follows is recommendations from current and prior course staff} \item {Better performance with different choices may be possible} \item {The goal isn't to try to use everything...} \item {Instead, think about what is worthwhile to incorporate in your project} \end{itemize} \end{block} \end{frame} \section{Memory} \begin{frame}[fragile]{Memory} \begin{block}{Memory} \begin{itemize} \item System memory is non-synthesizable \item Instantiated in mem.v in `testbenches' directory \item Do not change the memory bus interface! \item Memory responds at neg-edge of the clock \item Keep in mind that although the address space is 32 bits, we only have 64 KiB of memory \end{itemize} \begin{minted}[tabsize=4]{verilog} always @(negedge clk) begin mem2proc_response <= `SD next_mem2proc_response; mem2proc_data <= `SD next_mem2proc_data; mem2proc_tag <= `SD next_mem2proc_tag; end \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{Memory} \begin{block}{Wait, what is a ``tag''?} \begin{itemize} \item (Different from cache definition of ``tag'') \item Tag is a tracking number: like a valet service or shipping a package \item You order something online and get a tracking number \begin{itemize} \item Tells you the order has been processed \item Gives you a handle to sort through your mail \end{itemize} \item Why not just use address? \begin{itemize} \item Tag indicates that request is in progress \item Also indicates how many requests currently in flight \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Memory Interface} \begin{minted}[tabsize=2]{verilog} module mem ( input clk, // Memory clock input [63:0] proc2mem_addr, // address of current command input [63:0] proc2mem_data, // data of current command input [1:0] proc2mem_command, // `BUS_NONE, `BUS_LOAD, // or `BUS_STORE // 0 = no value, other = tag of transaction output logic [3:0] mem2proc_tag, // data resulting from a load output logic [63:0] mem2proc_data, // 0 = can't accept, other = tag of transaction output logic [3:0] mem2proc_response, ); \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Memory Internal Signals} \begin{minted}[tabsize=4]{verilog} logic [63:0] next_mem2proc_data; logic [3:0]next_mem2proc_response,next_mem2proc_tag; logic [63:0] unified_memory [`MEM_64BIT_LINES - 1:0]; logic [63:0] loaded_data [`NUM_MEM_TAGS:1]; logic [`NUM_MEM_TAGS:1] [15:0] cycles_left; logic [`NUM_MEM_TAGS:1] waiting_for_bus; logic acquire_tag; logic bus_filled; wire valid_address = (proc2mem_addr[2:0]==3'b0) && (proc2mem_addr<`MEM_SIZE_IN_BYTES); \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Memory Macros} \begin{itemize} \item `MEM\_LATENCY\_IN\_CYCLES \begin{itemize} \item Memory latency is fixed to 100ns for every group \item That means this macro will have a different value for each group \item We will test default value, but you should test other latencies \end{itemize} \item `NUM\_MEM\_TAGS \begin{itemize} \item No. of outstanding requests that the memory can handle \item We will be testing your processor with the value set to 15 \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Memory Output} \begin{itemize} \item Response (mem2proc\_response) \begin{itemize} \item Slot number in which the memory has accommodated the request \item Can be between 0 and 15 (inclusive) \item `0' is a special case and means that request has been rejected \begin{itemize} \item Issued max amount of outstanding requests \item Invalid address \item No request (command) was made \end{itemize} \end{itemize} \item Tag (mem2proc\_tag)) \begin{itemize} \item Appears on the bus with the data for a load request \item Slot no. in which the request had been accommodated \item Can be between 0 and 15 \item `0' means the data on the bus is invalid (X's) \item Non-zero means the data is valid \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Memory Output} \begin{itemize} \item Why do we need a tag anyway? \begin{itemize} \item Memory latency is non-zero \begin{itemize} \item Want to pipeline more than one request at a time \item This is called a non-blocking controller \item Need to know when a particular request has been fulfilled \end{itemize} \item Memory arbiter \begin{itemize} \item Up to three things may be contending for the memory \item I-cache, D-cache and Prefetcher \item Need to route requests to the right structure \end{itemize} \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Incoming Memory Logic} \begin{minted}[tabsize=3]{verilog} for(int i=1;i<=`NUM_MEM_TAGS;i=i+1) begin if(cycles_left[i]>16'd0) begin cycles_left[i] = cycles_left[i]-16'd1; end else if(acquire_tag && !waiting_for_bus[i] && (cycles_left[i]==0)) begin next_mem2proc_response = i; acquire_tag = 1'b0; cycles_left[i] = `MEM_LATENCY_IN_CYCLES; if(proc2mem_command==`BUS_LOAD) begin waiting_for_bus[i] = 1'b1; loaded_data[i] = unified_memory[proc2mem_addr[63:3]]; end else begin unified_memory[proc2mem_addr[63:3]]=proc2mem_data; end end \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{Memory Interface} \begin{block}{Outgoing Memory Logic} \begin{minted}[tabsize=4]{verilog} if((cycles_left[i]==16'd0) && waiting_for_bus[i] && !bus_filled) begin bus_filled = 1'b1; next_mem2proc_tag = i; next_mem2proc_data = loaded_data[i]; waiting_for_bus[i] = 1'b0; end \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{Memory} \begin{block}{Important Tidbits} \begin{itemize} \item You can change what you do with memory \begin{itemize} \item e.g. pipeline requests, prefetch addresses, novel caching techniques \end{itemize} \item But not how the memory actually works \begin{itemize} \item No modifying the memory module \item No modifying the memory bus to handle more requests or wider requests \end{itemize} \item Instantiated in mem.v in `testbenches' directory \item Remember data bus will be x's except during BUS\_LOAD \end{itemize} \end{block} \end{frame} \section{Union} \begin{frame}[fragile]{More data types???} \begin{itemize} \item So we covered structs before and you should be using them already \item There is a "dual" of that - union \item Just like its origin in C, a SystemVerilog union allows a single piece of storage to be represented different ways using different named member types \item "In type theory, a struct is the product type of all its members, whereas a union is the sum type" - my buddy Pranav \end{itemize} \end{frame} \begin{frame}[fragile]{Union example} \item In a simple example, we have a representation of a 64 bit cache block \begin{minted}[tabsize=4]{systemverilog} typedef union packed { logic [63:0] double; logic [1:0][31:0] words; logic [3:0][15:0] halves; logic [7:0][7:0] bytes; } CACHE_BLOCK; CACHE_BLOCK block; always_comb begin block.double = 64'hfacefacefaceface; //the entire block block.words[1] = 32'd420; //writing to the upper half block.bytes[2] = 8'd42; //writing only one byte end \end{minted} \end{frame} \begin{frame}[fragile]{Another example} \item Now let's say you want to break down addressing for different caches \begin{minted}[tabsize=4]{systemverilog} typedef struct packed { logic [17:0] tag; logic [10:0] block_num; logic [2:0] block_offset; } DMAP_ADDR; //address breakdown for a direct-mapped cache typedef struct packed { logic [19:0] tag; logic [7:0] set_index; logic [2:0] block_offset; } SASS_ADDR; //address breakdown for a set associative cache typedef union packed { DMAP_ADDR d; //for direct mapped SASS_ADDR s; //for set associative } ADDR; //now we can pass around a common data type \end{minted} \end{frame} \section{I-Cache Controller} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{I-Cache Controller Interface} \begin{minted}[tabsize=4]{verilog} assign {current_tag, current_index} = proc2Icache_addr[31:3]; output logic [4:0] last_index, output logic [7:0] last_tag, \end{minted} \begin{itemize} \item The instruction cache is direct mapped with 32 lines \item Memory consists of 8192 lines \item The index is therefore 5 bits and the block offset is 3 bits \item Every cycle last\_index/tag $<=$ current\_index/tag \begin{itemize} \item ``current'' signals come from Fetch \item ``last'' registers used as write index/tag for I-Cache \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Fetch Memory Load} \begin{minted}[tabsize=4]{verilog} wire changed_addr = (current_index!=last_index) || (current_tag!=last_tag); \end{minted} \begin{itemize} \item Anytime the address changed in fetch, changed\_addr will go high \begin{itemize} \item Cycle 12 here, so memory request issued in cycle 13 \end{itemize} \end{itemize} \begin{minted}[tabsize=1]{verilog} Cycle: IF | ID | EX | MEM | WB 11: 4: or | 4:- | 4:- | 4:- | 4:- 12: 8:- | 4: or | 4:- | 4:- | 4:- 13: 8:- | 8:- | 4: or | 4:- | 4:- BUS_LOAD 14: 8:- | 8:- | 8:- | 4: or | 4:- 15: 8:- | 8:- | 8:- | 8:- | 4: or r3=4096 \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Hit in cache} \begin{minted}[tabsize=4]{verilog} assign Icache_data_out = cachemem_data; assign Icache_valid_out = cachemem_valid; \end{minted} \begin{itemize} \item This is just the data and valid cache line bit from the cache \begin{itemize} \item It is ready every cycle and never needs to wait \end{itemize} \item These outputs go to Fetch \item Data to Fetch does not come from memory directly! \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Unanswered miss} \begin{minted}[tabsize=4]{verilog} wire unanswered_miss = changed_addr ? !Icache_valid_out : miss_outstanding & (Imem2proc_response==0); \end{minted} \begin{itemize} \item Checked the cache and the value came back invalid \begin{itemize} \item Now I will have to go to memory to get the data \item Or I sent a request to memory and it hasn't been accepted yet \end{itemize} \item miss\_outstanding is just the stored value of unanswered miss \begin{itemize} \item Either I missed in the cache last cycle, or memory didn't accept request \end{itemize} \end{itemize} \begin{minted}[tabsize=1]{verilog} Cycle: IF | ID | EX | MEM | WB 12: 8: - | 4: or | 4: - | 4:- | 4:- 13: 8: - | 8:- | 4: or | 4:- | 4:- BUS_LOAD \end{minted} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Unanswered miss} \begin{minted}[tabsize=4]{verilog} assign proc2Imem_addr = {proc2Icache_addr[63:3],3'b0}; assign proc2Imem_command = (miss_outstanding && !changed_addr) ? `BUS_LOAD : `BUS_NONE; \end{minted} \begin{itemize} \item proc2Imem\_addr just cuts off the block offset bits \item proc2Imem\_command will issue a Bus Load \begin{itemize} \item If missed in the cache last cycle or a previous request wasn't accepted. \end{itemize} \item If request is accepted, miss\_outstanding will be cleared. \begin{itemize} \item Looks at ``!changed\_addr'' because this indicates fetch PC changed \begin{itemize} \item If this happened, need to work on new request instead \end{itemize} \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Tracking Tags} \begin{minted}[tabsize=4]{verilog} wire update_mem_tag = changed_addr | miss_outstanding | data_write_enable; \end{minted} \begin{itemize} \item Once you send a `BUS\_LOAD the memory will respond with a ID number on the negative edge \item Need to hold onto this ID for your transaction (current\_mem\_tag) \item When miss\_outstanding is high, grab the ID number \begin{itemize} \item So that you can look for it when the memory broadcasts the value \end{itemize} \item When data\_write\_enable is high, you want to clear the ID number \begin{itemize} \item So you don't grab a new value with the same ID number \end{itemize} \item When changed\_addr is high, clear the ID number \begin{itemize} \item You don't care about the access anymore \item Usually because a branch occurred \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Tracking Tags} \begin{minted}[tabsize=1]{verilog} Cycle: IF | ID | EX | MEM | WB 47: 28:bne | 28:- | 28:- | 28:- | 28:- 48: 32: | 28:bne | 28:- | 28:- | 28:- 49: 32:- | 32:- | 28:bne | 28:- | 28:- BUS_LOAD[32] 50: 32:- | 32:- | 32:- | 28:bne | 28:- 51: 8:- | 32:- | 32:- | 32: | 28:bne 52: 8:blt | 8:- | 32:- | 32:- | 32: \end{minted} \begin{itemize} \item Clear ID number when changed\_address is high \item Safe to clear register on that cycle because old request isn't needed \item New memory request doesn't launch until next cycle \begin{itemize} \item changed\_addr would assert on cycle 51, so ID for request gets cleared \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Tag Comes Back} \begin{minted}[tabsize=4]{verilog} assign data_write_enable = (current_mem_tag==Imem2proc_tag) && (current_mem_tag!=0); \end{minted} \begin{itemize} \item Write enable to the I-Cache will go high when the tag that is on the memory bus matches \item The write index/tag is the index you sent off to the memory \begin{itemize} \item Stored as current index/tag \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{I-Cache Controller Piece by Piece} \begin{block}{Design Choices} \begin{itemize} \item Don't necessarily need to use the changed\_addr line \begin{itemize} \item Could have IF send ``read\_valid'' signal \end{itemize} \item Could use a wr\_idx instead of last\_idx \begin{itemize} \item Gets set when you send off a `BUS\_LOAD \end{itemize} \item Controller waits one cycle after cache miss to send to memory \begin{itemize} \item Can probably be done in one cycle \item But you have to handle the cache lookup in half a cycle \end{itemize} \item Prefetching will drastically increase performance \begin{itemize} \item Make sure you can handle reads and writes in the same cycle \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{D-Cache Controller} \begin{block}{D-Cache Controller} \begin{itemize} \item Have the D-Cache take priority over the I-Cache in every case \begin{itemize} \item Stall the Fetch stage like P3 if this happens \item Maybe change priority based on current ROB size \end{itemize} \item Similar to the I-Cache controller except now the controller can store to the cache along with memory \begin{itemize} \item Loads are handled the same as the I-Cache \item Stores now store to the Cache and the Memory (unless WB D\$) \begin{itemize} \item If the response is non-zero, assume the store completes \item But will still take up an ID for the entire memory access time \end{itemize} \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{D-Cache Controller} \begin{block}{Non-blocking Cache} \begin{itemize} \item Can work on other requests while waiting for memory to supply misses \item Miss Status Handling Registers (MSHRs) help in tracking the misses \begin{itemize} \item Basically a table of tag, address, and data values that are waiting \item A lot in common with a reservation station \end{itemize} \item Need to match tag of incoming data to the proper load in the table \item Increases complexity (but also performance!) \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Non-blocking Caches} \begin{block}{Non-blocking Caches} \begin{itemize} \item For the D-Cache: have multiple independent memory operations \begin{itemize} \item Want to be able to service another if one misses in cache \item Will likely evict useful instructions for useless ones \end{itemize} \item Basic idea: Use MSHRs to keep track of requests \item Hard part is the implementation... \begin{itemize} \item Figuring out when a request can go \begin{itemize} \item Depends on forwarding/speculative logic from lecture \end{itemize} \item Updating and ordering requests \item Once you launch a store, it's gone \end{itemize} \end{itemize} \end{block} \end{frame} \section{Memory Bus Example} \begin{frame}[fragile]{Memory Bus Example} \begin{block}{Memory Bus Example} \begin{itemize} \item `NUM\_MEM\_TAGS = 3 \item Only loads are considered \item `MEM\_LATENCY\_IN\_CYCLES = 5 \item Memory clocked on negedge \item MSHRs clocked on posedge \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Stores} \begin{block}{{Wait, what about stores?}} \begin{itemize} \item {Stores are registered in the memory in the same way} \item {Need the same number of cycles as loads} \item {If the response is 0, it means the store has not launched} \item {Memory requests are never reordered} \begin{itemize}{Take a minute to convince yourself this is the case...} \end{itemize} \item {Do we need to track stores in MSHRs?} \end{itemize} \end{block} \end{frame} \section{Prefetching} \begin{frame}[fragile]{Prefetching} \begin{block}{Prefetching} \begin{itemize} \item Idea: on a miss, grab more than just the current block \begin{itemize} \item Maybe make some sort of state machine \end{itemize} \item The farther you prefetch the more likely you will interfere with the D-Cache \item More complicated issues the more you prefetch... \begin{itemize} \item Suppose you prefetch two lines ahead of a taken branch \begin{itemize} \item Best case: The two lines you prefetched are no longer needed \item Worst case: you evict instructions you need from your I-Cache \end{itemize} \item Watch out for the interleaving of prefetched data and D-Cache data \begin{itemize} \item Don't want to slow down the D-Cache \end{itemize} \item Suppose your access misses but the data that you would prefetch hits in the cache \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Prefetching} \begin{block}{Instruction Prefetching} \begin{itemize} \item Probably best to stick with prefetching for just I-Cache, not D-Cache \item On a miss, grab more than just the current block \begin{itemize} \item Hope that the instruction/data will be requested in the near future... \item In which case it will already be in the cache (unless evicted) \end{itemize} \item The farther you prefetch the more likely you will interfere with the D-Cache \item More prefetching, more problems... \begin{itemize} \item Need to track multiple outstanding requests to memory \item Don't want to issue requests for lines that are already valid in the cache \item What to do when Fetch requests something else in the middle of waiting for the previous miss to come back? \item May run out of memory bandwidth \item May not get access to memory (if D-Cache requesting) \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Prefetching} \begin{block}{Main algorithm (after miss observed)} \begin{itemize} \item Issue request for missed line, store address and memory response, start prefetch FSM \item For as many cycles as we want to prefetch... \begin{itemize} \item Increment prefetch address to next line \item See if that line is valid in the cache \item If not, store address somewhere to be requested later \item When should you stop? \begin{itemize} \item If you hit a valid line? \item Fetch requests something else? (branch mispredicted) \item D-Cache needs access to bus? \end{itemize} \end{itemize} \item Recommend having a second read port on I-Cache for prefetcher to use \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Prefetching} \begin{block}{Tracking Requests} \begin{itemize} \item Keep buffer of requests in cache controller (MSHRs) \begin{itemize} \item Allocate entry on cache miss and we wish to prefetch \begin{itemize} \item Store address (so we know where to write into cache) \item Mark entry as wanting to send request \end{itemize} \item Look for entries wanting to send request \begin{itemize} \item Send request to memory with entry's stored address \item Store mem2proc\_response back in entry \item Mark entry as having sent a request \end{itemize} \item When data comes back from memory \begin{itemize} \item Compare mem2proc\_tag with stored responses from all valid buffer entries \item Get \{tag,index\} from stored address for writing into the cache \item De-allocate entry \end{itemize} \end{itemize} \end{itemize} \end{block} \end{frame} \begin{frame}[fragile]{Prefetching} \begin{block}{Prefetching Ideas} \begin{itemize} \item Conservative strategy: Grab next block on miss \begin{itemize} \item Helps quite a bit: half of all instructions are prefetched \end{itemize} \item Greedy strategy: march through memory \begin{itemize} \item Will likely evict useful instructions for useless ones \end{itemize} \item Move prefetch pointer on branch \begin{itemize} \item Predict taken? Or not taken? Or both? \item Branch predictor information could be helpful to decide \end{itemize} \end{itemize} \end{block} \end{frame} \end{document}
{ "alphanum_fraction": 0.6936199875, "avg_line_length": 32.0670588235, "ext": "tex", "hexsha": "e53c8672706ec047d4da0a1f6e249c1852111215", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-03-19T14:11:52.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-19T14:11:52.000Z", "max_forks_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jieltan/OpenCompArchCourse", "max_forks_repo_path": "labs/lab7/eecs470lab7slides.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jieltan/OpenCompArchCourse", "max_issues_repo_path": "labs/lab7/eecs470lab7slides.tex", "max_line_length": 159, "max_stars_count": 3, "max_stars_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jieltan/OpenCompArchCourse", "max_stars_repo_path": "labs/lab7/eecs470lab7slides.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-15T14:07:10.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-19T17:25:16.000Z", "num_tokens": 8547, "size": 27257 }
%!TEX root = ../thesis.tex % ******************************* Thesis Appendix C **************************** \chapter{TLB Data Configuration Format} \label{TLBDataFormat} \section{TLB Configuration Variable Layout} % Table generated by Excel2LaTeX from sheet 'Feuil1' \begin{table}[htbp] \centering \caption{Full Variable Layout TLB Configuration} \scalebox{0.7}{ \begin{tabular}{rrlrrr} \multicolumn{1}{l}{Start Index} & \multicolumn{1}{l}{Next Index} & Name & \multicolumn{1}{l}{Bit Size} & \multicolumn{1}{l}{Min} & \multicolumn{1}{l}{Max} \\ \toprule 0 & 1 & SamplingPhase & 1 & 0 & 1 \\ 1 & 2 & SamplingPhase & 1 & 0 & 1 \\ 2 & 3 & SamplingPhase & 1 & 0 & 1 \\ 3 & 4 & SamplingPhase & 1 & 0 & 1 \\ 4 & 5 & SamplingPhase & 1 & 0 & 1 \\ 5 & 6 & SamplingPhase & 1 & 0 & 1 \\ 6 & 7 & SamplingPhase & 1 & 0 & 1 \\ 7 & 8 & SamplingPhase & 1 & 0 & 1 \\ 8 & 10 & InputDelay & 2 & 0 & 3 \\ 10 & 12 & InputDelay & 2 & 0 & 3 \\ 12 & 14 & InputDelay & 2 & 0 & 3 \\ 14 & 16 & InputDelay & 2 & 0 & 3 \\ 16 & 18 & InputDelay & 2 & 0 & 3 \\ 18 & 20 & InputDelay & 2 & 0 & 3 \\ 20 & 22 & InputDelay & 2 & 0 & 3 \\ 22 & 24 & InputDelay & 2 & 0 & 3 \\ 24 & 27 & RandomTriggerRate & 3 & 0 & 7 \\ 27 & 35 & Prescale & 8 & 0 & 255 \\ 35 & 43 & Prescale & 8 & 0 & 255 \\ 43 & 51 & Prescale & 8 & 0 & 255 \\ 51 & 59 & Prescale & 8 & 0 & 255 \\ 59 & 67 & Prescale & 8 & 0 & 255 \\ 67 & 75 & Prescale & 8 & 0 & 255 \\ 75 & 82 & TrackerDelay & 7 & 0 & 127 \\ 82 & 89 & DigitizerDelay & 7 & 0 & 127 \\ 89 & 90 & LHC\_Clock & 1 & 0 & 1 \\ 90 & 102 & OrbitDelay & 12 & 0 & 4095 \\ 102 & 114 & Deadtime & 12 & 0 & 4095 \\ 114 & 134 & MonitoringRate & 20 & 0 & 1048575 \\ 134 & 166 & OutputDestination & 32 & 0 & 4294967295 \\ 166 & 167 & Enable & 1 & 0 & 1 \\ 167 & 168 & Enable & 1 & 0 & 1 \\ 168 & 169 & Enable & 1 & 0 & 1 \\ 169 & 170 & Enable & 1 & 0 & 1 \\ 170 & 171 & Enable & 1 & 0 & 1 \\ 171 & 172 & Enable & 1 & 0 & 1 \\ 172 & 173 & Enable & 1 & 0 & 1 \\ 173 & 174 & Enable & 1 & 0 & 1 \\ \end{tabular}% } \label{tab:VariableLayoutTLBConfigurationFULL}% \end{table}%
{ "alphanum_fraction": 0.4051598837, "avg_line_length": 49.1428571429, "ext": "tex", "hexsha": "5cad09bbc5f2db579dcfc1a644e61997fc5befda", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b4f34f6efb18d27e880866f49162b02669e07cb1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eliottlerouge/Master-Thesis-on-FASER-Eliott-JOHNSON", "max_forks_repo_path": "Appendix3/appendix3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b4f34f6efb18d27e880866f49162b02669e07cb1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eliottlerouge/Master-Thesis-on-FASER-Eliott-JOHNSON", "max_issues_repo_path": "Appendix3/appendix3.tex", "max_line_length": 162, "max_stars_count": null, "max_stars_repo_head_hexsha": "b4f34f6efb18d27e880866f49162b02669e07cb1", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eliottlerouge/Master-Thesis-on-FASER-Eliott-JOHNSON", "max_stars_repo_path": "Appendix3/appendix3.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1081, "size": 2752 }
\section{Tech Preview} This guide highlights the initial availability of \OHPC{} packages targeted for use on 64-bit ARM-based architectures. This collection is being provided as a {\bf Tech Preview} release initially, as there are some known issues around provisioning and a subset of development packages. A running log of errata and fixes can be found at the \href{https://github.com/openhpc/ohpc/wiki/ARM-Tech-Preview}{\color{blue}{ARM Tech Preview Wiki}}. The guide follows the general installation steps laid out in other companion \OHPC{} recipes. However, the provisioning steps as outlined with Warewulf (most of the steps in \S~\ref{sec:add_provisioning} thru \ref{sec:boot_computes}) are not directly usable without additional modification to the PXE boot process. Consequently, users interested in leveraging packages from this Tech Preview are encouraged to enable the repo and install desired development components (\S~\ref{sec:install_dev}) on top of systems were the underlying base OS is pre-installed. In addition, if multiple nodes are available, the \rms{} resource manager can be used to schedule resources. Future \OHPC{} releases will expand on this tech preview to include validated recipes for a bare-metal cluster install. \\ \noindent Known Package Issues: \begin{itemize*} \item GSL: a small subset of tests performed with the GSL library failed precision related tests. This is currently attributed to the fact that the tests included in GSL are tuned for x86 which does 80-bit extended precision. \item PAPI: hardware counter availability may not be available depending on the underlying ARM platform. \item MPI: The hardware used for validating this Tech Preview release contained only ethernet. The available MPI stacks reflect this test environment. %\item mpiP: appears to have trouble collecting certain information in certain %scenarios causing it to fail integration tests \item Hypre and SuperLU-dist: the libraries build, but when linking test applications unresolved symbols remain \item Nagios and Ganglia: don't work on SLES-12-SP1 due to missing PHP5 dependencies \item Lustre: since various ARM platforms require different kernels than the standard ones provided by the SLES-12-SP1 and CentOS-7.2 distributions, building a lustre client that would work for these specific platform configurations was beyond the scope of this release. \item Warewulf: the ARM Standard Base Boot Requirements and Standard Base System Architecture requires specific UEFI support during the boot process which doesn't seem to be compatible with the way warewulf currently auto-provisions worker nodes. There is a work-around, but it requires some manual intervention during installation and deployment of the nodes. \end{itemize*}
{ "alphanum_fraction": 0.809403255, "avg_line_length": 56.4285714286, "ext": "tex", "hexsha": "986bd06ee8323dddcea8008e9d15b4d413ff45f3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "97f704384d8cab1a59a86576972adef9d1b4d05b", "max_forks_repo_licenses": [ "Intel" ], "max_forks_repo_name": "ayushmathur/ostack-hpc", "max_forks_repo_path": "docs/recipe/install/common/tech_preview.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "97f704384d8cab1a59a86576972adef9d1b4d05b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Intel" ], "max_issues_repo_name": "ayushmathur/ostack-hpc", "max_issues_repo_path": "docs/recipe/install/common/tech_preview.tex", "max_line_length": 108, "max_stars_count": null, "max_stars_repo_head_hexsha": "97f704384d8cab1a59a86576972adef9d1b4d05b", "max_stars_repo_licenses": [ "Intel" ], "max_stars_repo_name": "ayushmathur/ostack-hpc", "max_stars_repo_path": "docs/recipe/install/common/tech_preview.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 612, "size": 2765 }
\subsection{Adjustment} \index{C}{adjustment!computation} There are two basic approaches to adjusting for covariates. Conceptually, the simplest one is to hold the covariates constant at some level when collecting data or by extracting a subset of data which holds those covariates constant. The other approach is to include the covariates in your models. For example, suppose you want to study the differences in the wages of male and females. The very simple model \model{\VN{wage}}{\VN{sex}} might give some insight, but it attributes to \VN{sex} effects that might actually be due to level of education, age, or the sector of the economy in which the person works. Here's the result from the simple model:\datasetCPS \begin{Schunk} \begin{Sinput} > cps = fetchData("cps.csv") > mod0 = lm( wage ~ sex, data=cps) > summary(mod0) \end{Sinput} \begin{Soutput} ... Estimate Std. Error t value Pr(>|t|) (Intercept) 7.879 0.322 24.50 < 2e-16 *** sexM 2.116 0.437 4.84 1.7e-06 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 5.03 on 532 degrees of freedom Multiple R-squared: 0.0422, Adjusted R-squared: 0.0404 F-statistic: 23.4 on 1 and 532 DF, p-value: 1.7e-06 \end{Soutput} \end{Schunk} The coefficients indicate that a typical male makes \$2.12 more per hour than a typical female. (Notice that $R^2 = 0.0422$ is very small: \VN{sex} explains hardly any of the person-to-person variability in wage.) By including the variables \VN{age}, \VN{educ}, and \VN{sector} in the model, you can adjust for these variables: \begin{Schunk} \begin{Sinput} > mod1 = lm( wage ~ age + sex + educ + sector, data=cps) > summary(mod1) \end{Sinput} \begin{Soutput} ... Estimate Std. Error t value Pr(>|t|) (Intercept) -4.6941 1.5378 -3.05 0.00238 ** age 0.1022 0.0166 6.17 1.4e-09 *** sexM 1.9417 0.4228 4.59 5.5e-06 *** educ 0.6156 0.0944 6.52 1.6e-10 *** sectorconst 1.4355 1.1312 1.27 0.20500 sectormanag 3.2711 0.7668 4.27 2.4e-05 *** sectormanuf 0.8063 0.7311 1.10 0.27064 sectorother 0.7584 0.7592 1.00 0.31829 sectorprof 2.2478 0.6698 3.36 0.00085 *** sectorsales -0.7671 0.8420 -0.91 0.36273 sectorservice -0.5687 0.6660 -0.85 0.39356 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 4.33 on 523 degrees of freedom Multiple R-squared: 0.302, Adjusted R-squared: 0.289 F-statistic: 22.6 on 10 and 523 DF, p-value: <2e-16 \end{Soutput} \end{Schunk} The adjusted difference between the sexes is \$1.94 per hour. (The $R^2=0.30$ from this model is considerably larger than for \texttt{mod0}, but still a lot of the person-to-person variation in wages has not be captured.) It would be wrong to claim that simply including a covariate in a model guarantees that an appropriate adjustment has been made. The effectiveness of the adjustment depends on whether the model design is appropriate, for instance whether appropriate interaction terms have been included. However, it's certainly the case that if you {\bf don't} include the covariate in the model, you have {\bf not} adjusted for it. The other approach is to subsample the data so that the levels of the covariates are approximately constant. For example, here is a subset that considers workers between the ages of 30 and 35 with between 10 to 12 years of education and working in the sales sector of the economy: \begin{Schunk} \begin{Sinput} > small = subset(cps, age <=35 & age >= 30 & educ>=10 & educ <=12 & sector=="sales" ) \end{Sinput} \end{Schunk} The choice of these particular levels of \VN{age}, \VN{educ}, and \VN{sector} is arbitrary, but you need to choose some level if you want to hold the covariates appproximately constant. The subset of the data can be used to fit a simple model: \begin{Schunk} \begin{Sinput} > mod4 = lm( wage ~ sex, data=small) > summary(mod4) \end{Sinput} \begin{Soutput} ... Estimate Std. Error t value Pr(>|t|) (Intercept) 4.500 0.500 9.0 0.07 . sexM 4.500 0.866 5.2 0.12 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.707 on 1 degrees of freedom Multiple R-squared: 0.964, Adjusted R-squared: 0.929 F-statistic: 27 on 1 and 1 DF, p-value: 0.121 \end{Soutput} \end{Schunk} At first glance, there might seem to be nothing wrong with this approach and, indeed, for very large data sets it can be effective. In this case, however, there are only 3 cases that satisfy the various criteria: two women and one man. \begin{Schunk} \begin{Sinput} > table( small$sex ) \end{Sinput} \begin{Soutput} F M 2 1 \end{Soutput} \end{Schunk} So, the \$4.50 difference between the sexes and wages depends entirely on the data from a single male! (Chapter \ref{chap:confidence} describes how to assess the precision of model coefficients. This one works out to be $4.50 \pm 11.00$ --- not at all precise.)
{ "alphanum_fraction": 0.6748359707, "avg_line_length": 37.2805755396, "ext": "tex", "hexsha": "ea2e8be420ba14e389a1bad7848fc81d2d8ac78f", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-09-08T12:42:15.000Z", "max_forks_repo_forks_event_min_datetime": "2017-02-14T05:22:29.000Z", "max_forks_repo_head_hexsha": "56fef8d4368e7afa7ccce006d8f4acc6cf6c1fd1", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "BriannaBarry/SM3", "max_forks_repo_path": "ComputationalTechnique-Orig/TotalPartial/computer-total-partial.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "56fef8d4368e7afa7ccce006d8f4acc6cf6c1fd1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "BriannaBarry/SM3", "max_issues_repo_path": "ComputationalTechnique-Orig/TotalPartial/computer-total-partial.tex", "max_line_length": 74, "max_stars_count": 1, "max_stars_repo_head_hexsha": "56fef8d4368e7afa7ccce006d8f4acc6cf6c1fd1", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "dtkaplan/SM3", "max_stars_repo_path": "ComputationalTechnique-Orig/TotalPartial/computer-total-partial.tex", "max_stars_repo_stars_event_max_datetime": "2021-04-01T01:28:07.000Z", "max_stars_repo_stars_event_min_datetime": "2021-04-01T01:28:07.000Z", "num_tokens": 1712, "size": 5182 }
\documentclass[12pt, letterpaper]{report} \usepackage[margin=1in]{geometry} \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{float} \usepackage{subfig} \graphicspath{ {./img/} } \setlength\parindent{0pt} \renewcommand\thesection{\Roman{section}.} \renewcommand{\thesubsection}{\alph{subsection}.} \title{CS1675 - Assignment 1} \author{Zachary M. Mattis} \begin{document} \maketitle \section{Problem 2 - Data Analysis} % A \subsection{Max / Min} \begin{center} \begin{tabular}{ |l|l|l|l|l|l|l|l|l| } \hline Max & 17 & 199 & 122 & 99 & 846 & 67.1 & 2.42 & 81 \\ \hline Min & 0 & 0 & 0 & 0 & 0 & 0 & 0.078 & 21 \\ \hline \end{tabular} \end{center} % B \subsection{Mean / Variance} \begin{center} \begin{tabular}{ |l|l|l|l|l|l|l|l|l| } \hline $\mu$ & 3.8451 & 120.8945 & 69.1055 & 20.5365 & 79.7995 & 31.9926 & 0.4719 & 33.2409 \\ \hline $\sigma$ & 11 & 1021 & 374 & 254 & 13264 & 62 & 0 & 138 \\ \hline \end{tabular} \end{center} % C \subsection{Subset} Class 0 \begin{center} \begin{tabular}{ |l|l|l|l|l|l|l|l|l| } \hline $\mu$ & 3.298 & 109.98 & 68.184 & 19.664 & 68.792 & 30.3042 & 0.4207 & 31.19 \\ \hline $\sigma$ & 3.0172 & 26.1412 & 18.0631 & 14.8889 & 98.8653 & 7.6899 & 0.2991 & 11.6677 \\ \hline \end{tabular} \end{center} Class 1 \begin{center} \begin{tabular}{ |l|l|l|l|l|l|l|l|l| } \hline $\mu$ & 4.8657 & 141.2575 & 70.8246 & 22.1642 & 100.3358 & 35.1425 & 0.5505 & 37.0672 \\ \hline $\sigma$ & 3.7412 & 31.9396 & 21.4918 & 17.6797 & 138.6891 & 7.263 & 0.3724 & 10.9683 \\ \hline \end{tabular} \end{center} Based on the data from the tables above, attribute 2 appears to show the greatest difference in mean among the differing classes. Attribute 2 corresponds to the plasma glucose concentration after 2 hours in an oral glucose tolerance test. This analysis makes sense considering that the two classes are divided by a diabetes diagnoses, which would have a strong correlation with an attribute associated to glucose levels. % D \subsection{Histogram} \begin{verbatim} function histogram_analysis( attribute_vector ) hist(attribute_vector, 20); end \end{verbatim} % E \subsection{Normal Distribution} The two attribute values that most closely match a normal distribution are Diastolic Blood Pressure (attr. 3) and Body Mass Index (attr. 6), as shown in the histograms below. \begin{figure}[H] \captionsetup[subfigure]{labelformat=empty} \centering \subfloat[Figure 1]{{\includegraphics[width=18em]{p2e3.png} }} \qquad \subfloat[Figure 2]{{\includegraphics[width=18em]{p2e6.png} }} \label{fig:example} \end{figure} % F \subsection{Subset Analysis} Attribute 2 of the dataset, plasma glucose concentration after 2 hours in an oral glucose tolerance test, appears to be the most helpful in discriminating between the two sets. Based on the two histograms below, figure 4 represents a normal distribution for the negative diabetes test. However, as seen in figure 3, the distribution is heavily skewed to the right, indicating a correlation between glucose levels and positive test patients. \begin{figure}[H] \captionsetup[subfigure]{labelformat=empty} \centering \subfloat[Figure 3]{{\includegraphics[width=18em]{p2f2d1.png} }} \qquad \subfloat[Figure 4]{{\includegraphics[width=18em]{p2f2d0.png} }} \label{fig:example} \end{figure} % G \subsection{Scatter Plots} \begin{verbatim} function scatter_plot( vector ) scatter( vector(:,1), vector(:,2) ); end \end{verbatim} Given two random variables that are independent, I would predict to see no correlation between the plotted values, which can be seen in figure 6. However, when these random variables are not quite independent, a correlation among the data points can be observed, as in figure 5. This is quite predictable in this example, as it is expected that those with a higher BMI would also tend toward a higher skin fold thickness due to the overlapping nature of the variables. \begin{figure}[H] \captionsetup[subfigure]{labelformat=empty} \centering \subfloat[Figure 5]{{\includegraphics[width=18em]{p2g4v6.png} }} \qquad \subfloat[Figure 6]{{\includegraphics[width=18em]{p2g3v5.png} }} \label{fig:example} \end{figure} \section{Problem 3 - Data Preprocessing} % A \subsection{One's Hot Encoding} One's hot encoding is a common scheme for encoding information utilizing standard binary values in a vector whereby the index of the '1' indicates the value of the scalar. Each subsequent scalar can be converted into its corresponding one's hot vector given the following encoding: \begin{verbatim} {brown, blue, white, red, yellow, orange, green, black} \end{verbatim} \begin{center} \begin{tabular}{ |l|l| } \hline red & [0, 0, 0, 1, 0, 0, 0, 0] \\ \hline black & [0, 0, 0, 0, 0, 0, 0, 1] \\ \hline yellow & [0, 0, 0, 0, 1, 0, 0, 0] \\ \hline red & [0, 0, 0, 1, 0, 0, 0, 0] \\ \hline green & [0, 0, 0, 0, 0, 0, 1, 0] \\ \hline blue & [0, 1, 0, 0, 0, 0, 0, 0] \\ \hline blue & [0, 1, 0, 0, 0, 0, 0, 0] \\ \hline \end{tabular} \end{center} % B \subsection{Normalization} \begin{verbatim} function [ normalized, mu, sigma ] = normalize( attribute ) mu = mean(attribute); sigma = std(attribute); normalized = (attribute - mu)/sigma; end \end{verbatim} First five values of Attribute 3 (Diastolic Blood Pressure) after being normalized. \begin{center} \begin{tabular}{ |l|l|l|l|l|l| } \hline 0.1495 & -0.1604 & -0.2638 & -0.1604 & -1.5037 \\ \hline \end{tabular} \end{center} % C \subsection{Discretization} \begin{verbatim} function [ discrete ] = discretize_attribute( attribute, k ) min_val = min(attribute); max_val = max(attribute); bin_div = (max_val - min_val)/k; discrete = fix((attribute-min_val)/bin_div); end \end{verbatim} First five values of Attribute 3 (Diastolic Blood Pressure) after being discretized. \begin{center} \begin{tabular}{ |l|l|l|l|l|l| } \hline 5 & 5 & 5 & 5 & 3 \\ \hline \end{tabular} \end{center} \section{Problem 4 - Data Training} \begin{verbatim} function [ training_set, testing_set ] = divideset( dataset, p_train ) training_count = round(p_train * length(dataset)); indices = randperm(length(dataset), training_count); t = zeros([length(dataset) 1]); t(indices) = 1; training_set = dataset(t(:) == 1,:); testing_set = dataset(t(:) == 0,:); end \end{verbatim} \section{Problem 5 - Matrix Operations} \subsection{$A^{T}$} \begin{center} \begin{tabular}{ |l|l| } \hline 1 & 3 \\ \hline 2 & 4 \\ \hline 5 & 6 \\ \hline \end{tabular} \end{center} \subsection{$B^{-1}$} \begin{center} \begin{tabular}{ |l|l|l| } \hline 1 & -5.5 & 1.25 \\ \hline 0 & -0.5 & 0.25 \\ \hline -0.667 & 4.333 & -1 \\ \hline \end{tabular} \end{center} \subsection{$B + C$} \begin{center} \begin{tabular}{ |l|l|l| } \hline 15 & 7 & 14 \\ \hline 3 & -1 & 7 \\ \hline 3 & 6 & 10 \\ \hline \end{tabular} \end{center} \subsection{$B - C$} \begin{center} \begin{tabular}{ |l|l|l| } \hline -1 & -5 & 4 \\ \hline 1 & 5 & -1 \\ \hline 5 & 10 & 2 \\ \hline \end{tabular} \end{center} \subsection{$A * B$} \begin{center} \begin{tabular}{ |l|l|l| } \hline 31 & 45 & 45 \\ \hline 53 & 59 & 75 \\ \hline \end{tabular} \end{center} \subsection{$B * C$} \begin{center} \begin{tabular}{ |l|l|l| } \hline 48 & 21 & 75 \\ \hline 15 & 0 & 30 \\ \hline 34 & -12 & 76 \\ \hline \end{tabular} \end{center} \subsection{$B * A$} Cannot compute matrix operation because inner dimensions do no match. \end{document}
{ "alphanum_fraction": 0.6639569949, "avg_line_length": 24.9248366013, "ext": "tex", "hexsha": "a0932f2fb4f2c57a27ee48129a155a9be5067c2f", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2021-03-04T07:41:07.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-14T03:28:19.000Z", "max_forks_repo_head_hexsha": "29ba0f4686f34d633b474bb792cf0e6cee8b0f1c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zmattis/University_of_Pittsburgh", "max_forks_repo_path": "CS-1675/Homework1/homework1_analysis.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "29ba0f4686f34d633b474bb792cf0e6cee8b0f1c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zmattis/University_of_Pittsburgh", "max_issues_repo_path": "CS-1675/Homework1/homework1_analysis.tex", "max_line_length": 468, "max_stars_count": 6, "max_stars_repo_head_hexsha": "29ba0f4686f34d633b474bb792cf0e6cee8b0f1c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zmattis/University_of_Pittsburgh", "max_stars_repo_path": "CS-1675/Homework1/homework1_analysis.tex", "max_stars_repo_stars_event_max_datetime": "2021-07-05T09:25:12.000Z", "max_stars_repo_stars_event_min_datetime": "2017-07-21T17:56:15.000Z", "num_tokens": 2865, "size": 7627 }
\clearpage\chapter{The Type Inferencing Model} Three sections of this dissertation are devoted to type inferencing: two chapters and an appendix. This chapter develops a theoretical model of type inferencing for Icon. For simplicity, it ignores some features of the language. This chapter presents intuitive arguments for the correctness of the formal model. Chapter 19 describes the actual implementation of type inferencing in the Icon compiler. The implementation handles the full Icon language and, for pragmatic reasons, differs from the theoretical model in some details. This chapter starts with the motivation for performing type inferencing. It then describes the concept of \textit{abstract interpretation}. This concept is used as a tool in this chapter to develop a type inferencing system from Icon's semantics. This chapter gives an intuitive presentation of this development process before presenting the formal models of abstract semantics for Icon. The most abstract of the formal models is the type inferencing system. \section{Motivation} Variables in the Icon programming language are untyped. That is, a variable may take on values of different types as the execution of a program proceeds. In the following example, \texttt{x} contains a string after the read (if the read succeeds), but it is then assigned an integer or real, provided the string can be converted to a numeric type. \goodbreak \begin{iconcode} x := read()\\ if numeric(x) then x +:= 4\\ \end{iconcode} In general, it is impossible to know the type of an operator's operands at translation time, so some type checking must be done at run time. This type checking may result in type conversions, run-time errors, or the selection among polymorphous operations (for example, the selection of integer versus real addition). In the Icon interpreter system, all operators check all of their operands at run time. This incurs significant overhead. Much of this run-time type checking is unnecessary. An examination of typical Icon programs reveals that the types of most variables remain consistent throughout execution (except for the initial null value) and that these types can often be determined by inspection. Consider \goodbreak \begin{iconcode} if x := read() then\\ \>y := x || ";"\\ \end{iconcode} Clearly both operands of {\textbar}{\textbar} are strings so no checking or conversion is needed. The goal of a type inferencing system is to determine what types variables may take on during the execution of a program. It associates with each variable usage a set of the possible types of values that variable might have when execution reaches the usage. This set may be a conservative estimate (overestimate) of the actual set of possible types that a variable may take on because the actual set may not be computable, or because an analysis to compute the actual set may be too expensive. However, a good type inferencing system operating on realistic programs can determine the exact set of types for most operands and the majority of these sets in fact contain single types, which is the information needed to generate code without type checking. The Icon compiler has an effective type inferencing system based on data flow analysis techniques. \section{Abstract Interpretation } Data flow analysis can be viewed as a form of abstract interpretation [.absintrp.]. This can be particularly useful for understanding type inferencing. A ``concrete'' interpreter for a language implements the standard (operational) semantics of the language, producing a sequence of states, where a state consists of an execution point, bindings of program variables to values, and so forth. An abstract interpreter does not implement the semantics, but rather computes information related to the semantics. For example, an abstract interpretation may compute the sign of an arithmetic expression rather than its value. Often it computes a ``conservative'' estimate for the property of interest rather than computing exact information. Data flow analysis is simply a form of abstract interpretation that is guaranteed to terminate. This chapter presents a sequence of approximations to Icon semantics, culminating in one suitable for type inferencing. Consider a simplified operational semantics for Icon, consisting only of program points (with the current execution point maintained in a program counter) and variable bindings (maintained in an environment). As an example of these semantics, consider the following program. Four program points are annotated with numbers using comments (there are numerous intermediate points not annotated). \goodbreak \begin{iconcode} procedure main()\\ local s, n\\ \\ \>\# 1:\\ \>s := read()\\ \>\# 2:\\ \>every n := 1 to 2 do \{\\ \>\>\# 3:\\ \>\>write(s[n])\\ \>\>\}\\ \>\# 4:\\ end\\ \end{iconcode} If the program is executed with an input of \texttt{abc}, the following states are included in the execution sequence (only the annotated points are listed). States are expressed in the form \textit{program point}: \textit{environment}. \goodbreak \begin{specialcode}{} \>\>1: [\texttt{s} = null, \texttt{n} = null]\\ \>\>2: [\texttt{s} = "\texttt{abc}", \texttt{n} = null]\\ \>\>3: [\texttt{s} = "\texttt{abc}", \texttt{n} = 1]\\ \>\>3: [\texttt{s} = "\texttt{abc}", \texttt{n} = 2]\\ \>\>4: [\texttt{s} = "\texttt{abc}", \texttt{n} = 2]\\ \end{specialcode} It is customary to use the \textit{collecting semantics} of a language as the first abstraction (approximation) to the standard semantics of the language. The collecting semantics of a program is defined in Cousot and Cousot [.absintrp.] (they use the term \textit{static semantics}) to be an association between program points and the sets of environments that can occur at those points during all possible executions of the program. Once again, consider the previous example. In general, the input to the program is unknown, so the read function is assumed to be capable of producing any string. Representing this general case, the set of environments (once again showing only variable bindings) that can occur at point 3 is \goodbreak \begin{specialcode}{} \>\>[\texttt{s} = "", \texttt{n} = 1],\\ \>\>[\texttt{s} = "", \texttt{n} = 2],\\ \>\>[\texttt{s} = "a", \texttt{n} = 1],\\ \>\>[\texttt{s} = "a", \texttt{n} = 2],\\ \>\>\>\ \ldots\\ \>\>[\texttt{s} = "\texttt{abcd}", \texttt{n} = 1],\\ \>\>[\texttt{s} = "\texttt{abcd}", \texttt{n} = 2],\\ \>\>\> \ldots\\ \end{specialcode} A type inferencing abstraction further approximates this information, producing an association between each variable and a type at each program point. The actual type system chosen for this abstraction must be based on the language and the use to which the information is put. The type system used here is based on Icon's run-time type system. For structure types, the system used retains more information than a simple use of Icon's type system would retain; this is explained in detail later. For atomic types, Icon's type system is used as is. For point 3 in the preceding example the associations between variables and types are \begin{specialcode}{} \>\>[\texttt{s} = string, \texttt{n} = integer]\\ \end{specialcode} The type inferencing system presented in this chapter is best understood as the culmination of a sequence of abstractions to the semantics of Icon, where each abstraction discards certain information. For example, the collecting semantics discards sequencing information among states; in the preceding program, collecting semantics determine that, at point 3, states may occur with \texttt{n} equal to 1 and with \texttt{n} equal to 2, but does not determine the order in which they must occur. This sequencing information is discarded because desired type information is a static property of the program. The first abstraction beyond the collecting semantics discards dynamic control flow information for goal directed evaluation. The second abstraction collects, for each variable, the value associated with the variable in each environment. It discards information such as, ``\texttt{x} has the value 3 when \texttt{y} has the value 7'', replacing it with ``\texttt{x} may have the value 3 sometime and \texttt{y} may have the value 7 sometime.''. It effectively decouples associations between variables. This second abstraction associates a set of values with a variable, but this set may be any of an infinite number of sets and it may contain an infinite number of values. In general, this precludes either a finite computation of the sets or a finite representation of them. The third abstraction defines a type system that has a finite representation. This abstraction discards information by increasing the set associated with a variable (that is, making the set less precise) until it matches a type. This third model can be implemented with standard iterative data flow analysis techniques. This chapter assumes that an Icon program consists of a single procedure and that all invocations are to built-in functions. It also assumes that there are no co-expressions beyond the main co-expression. See Chapter 19 for information on how to extend the abstractions to multiple procedures and multiple co-expressions. \section{Collecting Semantics} The collecting semantics of an Icon program is defined in terms of a \textit{flow graph} of the program. A flow graph is a directed graph used to represent the flow of control in a program. Nodes in the graph represent the executable primitives in the program. An edge exists from node \textbf{A} to node \textbf{B} if it is possible for execution to pass directly from the primitive represented by node \textbf{A} to the primitive represented by node \textbf{B}. Cousot and Cousot [.absintrp.] prove that the collecting semantics of a program can be represented as the least fixed point of a set of equations defined over the edges of the program's flow graph. These equations operate on sets of environments. For an example of a flow graph, consider the Icon program \goodbreak \begin{iconcode} procedure main()\\ \>every write(1 to 3)\\ end\\ \end{iconcode} \noindent The diagram below on the left shows the abstract syntax tree for this procedure, including the implicit \texttt{fail} at the end of the procedure. The invoke node in the syntax tree represents procedure invocation. Its first argument must evaluate to the procedure to be invoked; in this case the first argument is the global variable \texttt{write}. The rest of the arguments are used as the arguments to the procedure. \texttt{pfail} represents procedure failure (as opposed to expression failure within a procedure). Nodes corresponding to operations that produce values are numbered for purposes explained below. A flow graph can be derived from the syntax tree. This is shown on the right. \begin{figure}[htb] %% \begin{picture}(400,250) %% %\put(0,0){\graphpaper{40}{25}} %% \put(0,0){\includegraphics[width=2.4in,height=3.4in]{kw/figure3-1.png}} %% \put(200,20){\includegraphics[width=2.2in,height=3.1in]{kw/figure3-2.png}} %% \end{picture} \begin{center} \begin{tikzpicture} [draw,thick,font=\small\tt, % define a "curved box" style level distance=12mm, cbox/.style={rounded rectangle, rounded rectangle arc length=90, minimum width=10mm, draw}, every label/.style={font=\small\it} ] \node (lhs) [draw,shape=rounded rectangle, rounded rectangle arc length=180] {procedure main} child { node [cbox] {;} child {node [cbox] {every} child {node[cbox,label=left:1] {invoke} child { node[cbox, label=left:2] {write}} child[missing] child { node[cbox, label=left:3] {to} child {node[cbox, label=left:4] {1}} child {node[cbox, label=left:5] {3}} } } } child[missing] child {node [cbox] {pfail}} }; \begin{scope}[>=latex, edge from parent/.style={draw,arrows=-latex}] \node(rhs) [right=5cm, draw,shape=rounded rectangle, rounded rectangle arc length=180, ] {procedure main} child { node [cbox,label=left:2] {write} child {node[cbox,label=left:4] {1} child {node[cbox,label=left:5] {3} child {node (to) [cbox, label=left:3] {to} child [missing] child[missing] child { node (invoke) [cbox, label=left:1] {invoke}} child [missing] child { node [cbox] {pfail}} } } } }; %\draw[blue] (current bounding box.south west) rectangle (current bounding box.north east); \draw[->] (invoke.south) to[out=225, in=135,distance=3cm] (to.north west); %\draw[red] (current bounding box.south west) rectangle (current bounding box.north east); \end{scope} \end{tikzpicture} \vspace*{-2cm} % The last draw command's control points extend the bounding box too far. \end{center} \caption{Syntax tree and flow graph for a simple procedure} \end{figure} The node labeled procedure main is the \textit{start node} for the procedure; it performs any necessary initializations to establish the execution environment for the procedure. The edge from \texttt{invoke} to \texttt{to} is a resumption path induced by the control structure \texttt{every}. The path from \texttt{to} to \texttt{pfail} is the failure path for \texttt{to}. It is a forward execution path rather than a resumption path because the compound expression (indicated by \texttt{;}) limits backtracking out of its left-hand sub-expression. Chapter 19 describes how to determine the edges of the flow graph for an Icon program. Both the standard semantics and the abstract semantics must deal with the intermediate results of expression evaluation. A temporary-variable model is used because it is more convenient for this analysis than a stack model. This decision is unrelated to the use of a temporary-variable model in the compiler. This analysis uses a trivial assignment of temporary variables to intermediate results. Temporary variables are not reused. Each node that produces a result is assigned some temporary variable \textit{ri} in the environment. Assuming that temporary variables are assigned to the example according to the node numbering, the \texttt{to} operation has the effect of \iconline{\>\>\ttit{r3} := \ttit{r4} to \ttit{r5} } \noindent Expressions that represent alternate computations must be assigned the same temporary variable, as in the following example for the subexpression \texttt{x := ("a" {\textbar} "b")}. The syntax tree below on the left and the flow graph are shown on the right. %-% \begin{picture}(400,180) %-% %\put(0,0){\graphpaper{40}{18}} %-% \put(0,0){\includegraphics[width=2.2492in,height=2.2398in]{kw/figure3-3.png}} %-% \put(200,0){\includegraphics[width=2.4154in,height=2.3in]{kw/figure3-4.png}} %-% \end{picture} \begin{figure}[htb] \begin{center} \begin{tikzpicture} [draw,thick,font=\small\tt, level distance=12mm, % define a "curved box" style cbox/.style={rounded rectangle, rounded rectangle arc length=90, minimum width=10mm, draw}, every label/.style={font=\small\it} ] \node (lhs) {} child { node [cbox,label=left:1] {:=} child { node[cbox,label=left:2] {x}} child[missing] child { node[cbox] {alt} child { node[cbox, label=left:3] {"a"}} child [label=right:lab] { node[cbox, label=left:3] (bottom) {"b"}} } }; % The flow graph part of the diagram is best drawn "bottom up" \begin{scope}[edge from parent/.style={draw,arrows=latex-}, grow'=up] \node(rhs) at ($(bottom.south) + (5cm,-1cm)$) {} child { node [cbox,label=left:1] {:=} child { node[cbox,label=left:3] {"a"} child {node[cbox, label=left:2] {x} child{}} } child[missing] child { node[cbox,label=left:3] {"b"} child {}} }; \end{scope} %\draw (current bounding box.south west) rectangle (current bounding box.north east); \end{tikzpicture} \end{center} \caption{syntax tree and flow graph for \texttt{x := ("a" {\textbar} "b")}} \end{figure} The \texttt{if} and \texttt{case} control structures are handled similarly. In addition to temporary variables for intermediate results, some generators may need additional temporary variables to hold internal states during suspension. It is easy to devise a scheme to allocate them where they are needed; details are not presented here. The syntax tree is kept during abstract interpretation and used to determine the temporary variables associated with an operation and its operands. The equations that determine the collecting semantics of the program are derived directly from the standard semantics of the language. The set of environments on an edge of the flow graph is related to the sets of environments on edges coming into the node at the head of this edge. This relationship is derived by applying the meaning of the node (in the standard semantics) to each of the incoming environments. It requires a rather complex environment to capture the full operational semantics (and collecting semantics) of a language like Icon. For example, the environment needs to include a representation of the external file system. However, later abstractions only use the fact that the function \texttt{read} produces strings. This discussion assumes that it is possible to represent the file system in the environment, but does not give a representation. Other complexities of the environment are discussed later. For the moment, examples only show the bindings of variables to unstructured (atomic) values. As an example of environments associated with the edges of a flow graph, consider the assignment at the end of the following code fragment. The comments in the if expression are assertions that are assumed to hold at those points in the example. \goodbreak \begin{iconcode} \>if x = 7 then \{\\ \>\>...\\ \>\>\# x is 7 and y is 3\\ \>\>\}\\ \>else \{\\ \>\>...\\ \>\>\# (x is null and y is 1) or (x is "abc" and y is 2)\\ \>\>\}\\ \>x := y + 2\\ \end{iconcode} Because of the preceding \texttt{if} expression, there are two paths reaching the assignment. The diagram below shows the flow graph and accompanying environments for the expression; the diagram ignores the fact that the assignment expression requires several primitive operations to implement. %-% \includegraphics[width=6.0in,height=2.1in]{kw/figure3-5.png} \begin{figure}[htb] \begin{center} % This tree is best drawn upside down \begin{tikzpicture}[draw, edge from parent/.style={draw,arrows=latex-}, grow'=up, thick,font=\small\tt, level distance=2cm ] \node (root) {} child { node [rounded rectangle, draw] {x := y + 2} { child { edge from parent coordinate (inL)} child[missing] child { edge from parent coordinate (inR)} } edge from parent coordinate (out) }; \node[anchor=east] at ([xshift=-0.3cm]inL) {\{~[x=null,y=1]~,~[x="abc",y=2]~\}}; \node[anchor=west] at ([xshift=0.3cm]inR) {\{~[x=7,y=3]~\}}; \node[anchor=east] at ([xshift=-0.3cm]out) {\{~[x=3,y=1]~,~[x=4,y=2]~,~[x=5,y=3]~\}}; \end{tikzpicture} \end{center} \caption{A flow graph for \texttt{x := y + 2}} \end{figure} For a conditional expression, an incoming environment is propagated to the path that it would cause execution to take in the standard semantics. This requires distinguishing the paths to be taken on failure (backtracking paths) from those to be taken on success. The following diagram shows an example of this. %-%{\centering \includegraphics[width=5.3992in,height=2.2299in]{kw/figure3-6.png} \par} \begin{figure}[htb] \begin{center} \begin{tikzpicture}[ draw, edge from parent/.style={draw,arrows=-latex}, thick,font=\small\tt, level distance=2cm, cbox/.style={rounded rectangle, rounded rectangle arc length=90, minimum width=10mm, draw} ] \node (root) {} child { node [cbox] {x < y} { child { edge from parent coordinate (outL)} child[missing] child { edge from parent coordinate (outR)} } edge from parent coordinate (in) }; \node[anchor=east] at ([xshift=-0.3cm]in) {\{~[x=1,y=2]~,~[x=1,y=1]~,~[x=3,y=100]~\}}; \node[anchor=east] at ([xshift=-0.5cm,yshift=-0.2cm]outL) {\{~[x=1,y=2]~,~[x=3,y=100]~\}}; \node[anchor=east] at ([yshift=0.5cm]outL) {\it success}; \node[anchor=west] at ([xshift=0.5cm,yshift=-0.2cm]outR) {\{~[x=1,y=1]~\}}; \node[anchor=west] at ([yshift=0.5cm]outR) {\it failure}; \end{tikzpicture} \end{center} \caption{A flow graph for a conditional expression} \end{figure} In general there may be several possible backtracking paths. The environments in the standard and collecting semantics need to include a stack of current backtracking points and control flow information, and the flow graph needs instructions to maintain this stack. The Icon interpreter system described in Part I is an example of how this information can be maintained. However, the first abstraction to the collecting semantics eliminates the need for this information, so the information is not presented in detail here. \section*{Model 1: Eliminating Control Flow Information} \addcontentsline{toc}{section}{Model 1: Eliminating Control Flow Information} The first abstraction involves taking the union of the environments propagated along all the failure paths from a node in the collecting semantics and propagating that union along each of the failure paths in the new abstraction. This abstraction eliminates the stack of backtracking points from the environment. A more formal definition for this model requires taking a closer look at Icon data values, especially those values with internal structure. In order to handle Icon data objects with pointer semantics, an environment needs more than variable bindings. This fact is important to type inferencing. The problem is handled by including two components in the environment. The first is the \textit{store}, which maps variables to values. Variables include \textit{named} variables, \textit{temporary} variables, and \textit{structure} variables. Named variables correspond to program identifiers. Temporary variables hold intermediate results as discussed above. Structure variables are elements of structures such as lists. Note that the sets of named variables and temporary variables are each finite (based on the assumption that a program consists of a single non-recursive procedure; as mentioned earlier, this assumption is removed in Chapter 19), but for some non-terminating programs, the set of structure variables may be infinite. \textit{Program} variables include both named variables and structure variables but not temporary variables. Values include atomic data values such as integers, csets, and strings. They also include \textit{pointers} that reference objects with pointer semantics. In addition to the values just described, temporary variables may contain references to program variables. These \textit{variable references} may be used by assignments to update the store or they may be dereferenced by other operations to obtain the values stored in the variables. The second part of the environment is the \textit{heap}. It maps pointers to the corresponding data objects (this differs from the heap in the Icon implementation in that that heap also contains some data objects that do not have pointer semantics). For simplicity, the only data type with pointer semantics included in this discussion is the list. A list is a partial mapping from integers to variables. Representing other data types with pointer semantics is straightforward; this is discussed in Chapter 19. The first abstraction is called Model 1. The notations envir$_{[n]}$, store$_{[n]}$, and heap$_{[n]}$ refer to the sets of possible environments, stores, and heaps respectively in model $n$. For example, envir$_{[1]}$ is the set of possible environments in the first abstraction. In the following set of definitions, $X \times Y$ is the set of ordered pairs where the first value in the pair is from $X$ and the second value is from $Y$. $ X \rightarrow Y$ is the set of partial functions from $X$ to $Y$. The definition of the set of possible environments for model 1 is \goodbreak \begin{specialcode}{} \>envir$_{[1]}$ = store$_{[1]}$ $\times$ heap$_{[1]}$\\ \>store$_{[1]}$ = variables $\rightarrow$ values\\ \>values = integers $\cup$ strings $\cup$ \ldots $\cup$ pointers $\cup$ variables\\ \>heap$_{[1]}$ = pointers \textrm{${\rightarrow}$} lists,~% where lists = integers $\rightarrow$ variables\\ \end{specialcode} For example, the expression \iconline{ \>a := ["abc"] } \noindent creates a list of one element whose value is the string \texttt{abc} and assigns the list to the variable \texttt{a}. Let $p_1$ be the pointer to the list and let $v_1$ be the (anonymous) variable within the list. The resulting environment, e $\in$ envir$_{[1]}$, might be \goodbreak \begin{specialcode}{} \>e = $(s,h)$, where $s \in $ store$_{[1]}$, $h \in $ heap$_{[1]}$\\ \>$s($\texttt{a}$) = p_1$\\ \>$s(v_1) = $ "\texttt{abc}"\\ \\ \>$h(p_1) = L_1$, where $L_1 \in$ lists\\ \\ \>$L_1(1) = v_1$\\ \end{specialcode} \noindent If the statement \iconline{ \>a[1] := "xyz" } \noindent is executed, the subscripting operation dereferences \texttt{a} producing $p_1$, then uses the heap to find $L_1$, which it applies to 1 to produce the result $v_1$. The only change in the environment at this point is to temporary variables that are not shown. The assignment then updates the store, producing \goodbreak \begin{iconcode} \>$e_1 = (s_1 , h)$\\ \>$s_1$(\texttt{a})$ = p_1$\\ \>$s_1(v_1) =$ "xyz"\\ \end{iconcode} \noindent Assignment does not change the heap. On the other hand, the expression \iconline{ \ \ put(a, "xyz") } \noindent adds the string \texttt{xyz} to the end of the list; if it is executed in the environment $e$, it alters the heap along with adding a new variable to the store. \goodbreak \begin{specialcode}{} \>$e_1 = (s_1 , h_1$)\\ \>$s_1($\texttt{a}$) = p_1$\\ \>$s_1(v_1) =$ "\texttt{abc}"\\ \>$s_1(v_2) =$ "\texttt{xyz}"\\ \>$h_1(p_1) = L_2$\\ \>$L_2(1) = v_1$\\ \>$L_2(2) = v_2$\\ \end{specialcode} If a formal model were developed for the collecting semantics, it would have an environment similar to the one in Model 1. However, it would need a third component with which to represent the backtracking stack. \section*{Model 2: Decoupling Variables} \addcontentsline{toc}{section}{Model 2: Decoupling Variables} The next approximation to Icon semantics, Model 2, takes all the values that a variable might have at a given program point and gathers them together. In general, a variable may have the same value in many environments, so this, in some sense, reduces the amount of space required to store the information (though the space may still be unbounded). The ``cost'' of this reduction of storage is that any information about relationship of values between variables is lost. Model 2 is also defined in terms of environments, stores, and heaps, although they are different from those of Model 1. A store in Model 2 maps sets of variables to sets of values; each resulting set contains the values associated with the corresponding variables in environments in Model 1. Similarly, a heap in Model 2 maps sets of pointers to sets of lists; each of these sets contains the lists associated with the corresponding pointers in environments in Model 1. An environment in Model 2 contains a store and a heap, but unlike in Model 1, there is only one of these environments associated with each program point. The environment is constructed so that it effectively ``contains'' the environments in the set associated with the point in Model 1. The definition of Model 2 is \goodbreak \begin{specialcode}{} \>envir$_{[2]}$ = store$_{[2]} \times $ heap$_{[2]}$\\ \>store$_{[2]}$ = $2^{\textrm{variables}} \rightarrow 2^{\textrm{values}}$ \\ \>heap$_{[2]}$ = $2^{\textrm{pointers}} \rightarrow 2^{\textrm{lists}}$ \\ \end{specialcode} In Model 1, operations produce elements from the set \textit{values}. In Model 2, operations produce subsets of this set. It is in this model that \texttt{read} is taken to produce the set of all strings and that the existence of an external file system can be ignored. Suppose a program point is annotated with the set containing the following two environments from Model 1. \goodbreak \begin{iconcode} \> $e_1,e_2 \in$ envir$_{[1]}$\\ \> $e_1 = (s_1, h_1)$\\ \> $s_1($\texttt{x}$) = 1$\\ \> $s_1($\texttt{y}$) = p_1$\\ \> $h_1(p_1) = L_1$\\ \\ \> $e_2 = (s_2, h_2)$\\ \> $s_2($\texttt{x}$) = 2$\\ \> $s_2($\texttt{y}$) = p_1$\\ \> $h_2(p_1) = L_2$\\ \end{iconcode} \noindent Under Model 2 the program point is annotated with the single environment $\hat{e} {\in}$ envir$_{[2]}$, where \goodbreak \begin{iconcode} \> $\hat{e} = (\hat{s},\hat{h})$\\ \> $\hat{s}(\{$\texttt{x}\}$) = \{1,2\}$\\ \> $\hat{s}(\{$\texttt{y}\}$) = \{p_1\}$\\ \> $\hat{s}(\{$\texttt{x}, \texttt{y}\}$) = \{1, 2, p_1\}$\\ \> $\hat{h}(\{p_1\}) = \{L_1, L_2\}$\\ \end{iconcode} \noindent Note that a store in Model 2 is distributive over union. That is, $\hat{s}(X \cup Y) = \hat{s}(X) \cup \hat{s}(Y)$ \noindent so listing the result of $\hat{s}(\{$\texttt{x}, \texttt{y}$\})$ is redundant. A heap in Model 2 also is distributive over union. In going to Model 2 information is lost. In the last example, the fact that \texttt{x = 1} is paired with $p_1 =L_1$ and \texttt{x = 2} is paired with $p_1 = L_2$ is not represented in Model 2. Just as \texttt{read} is extended to produce a set of values, so are all other operations. These "extended" operations are then used to set up the equations whose solution formally defines Model 2. This extension is straightforward. For example, the result of applying a unary operator to a set is the set obtained by applying the operator to each of the elements in the operand. The result of applying a binary operator to two sets is the set obtained by applying the operator to all pairs of elements from the two operands. Operations with more operands are treated similarly. For example \begin{eqnarray*} \{1, 3, 5\} + \{2, 4\} & = & \{1 + 2, 1 + 4, 3 + 2, 3 + 4, 5 + 2, 5 + 4\}\\ & = & \{3, 5, 5, 7, 7, 9\}\\ & = & \{3, 5, 7, 9\}\\ \end{eqnarray*} The loss of information mentioned above affects the calculation of environments in Model 2. Suppose the addition in the last example is from \iconline{\>z := x + y } \noindent and that Model 1 has the following three environments at the point before the calculation \goodbreak \begin{iconcode} \>[x = 1, y = 2, z = 0]\\ \>[x = 3, y = 2, z = 0]\\ \>[x = 5, y = 4, z = 0]\\ \end{iconcode} After the calculation the three environments will be \goodbreak \begin{iconcode} \>[x = 1, y = 2, z = 3]\\ \>[x = 3, y = 2, z = 5]\\ \>[x = 5, y = 4, z = 9]\\ \end{iconcode} If these latter three environments are translated into an environment of Model 2, the result is \iconline{ \>[x = \{1, 3, 5\}, y = \{2, 4\}, z = \{3, 5, 9\}] } However, when doing the computation using the semantics of + in Model 2, the value for \texttt{z} is \texttt{\{3, 5, 7, 9\}}. The solution to the equations in Model 2 overestimates (that is, gives a conservative estimate for) the values obtained by computing a solution using Model 1 and translating it into the domain of Model 2. Consider the following code with respect to the semantics of assignment in Model 2. (Assume that the code is executed once, so only one list is created.) \goodbreak \begin{iconcode} \>x := [10, 20]\\ \>i := if read() then 1 else 2\\ \>x[i] := 30\\ \end{iconcode} After the first two assignments, the store maps \texttt{x} to a set containing one pointer and maps \texttt{i} to a set containing 1 and 2. The third assignment is not as straightforward. Its left operand evaluates to two variables; the most that can be said about one of these variables after the assignment is that it might have been assigned 30. If $(s, h)$ is the environment after the third assignment then \goodbreak \begin{specialcode}{} \>$s(\{$\texttt{x}$\}) = \{ p_1 \}$\\ \>$s(\{$\texttt{i}$\}) = \{1, 2\}$\\ \>$s(\{v_1\}) = \{10, 30\}$\\ \>$s(\{v_2\}) = \{20, 30\}$\\ \\ \>$h(\{p_1\}) = \{L_1\}$\\ \\ \>$L_1(1) = v_1$\\ \>$L_1(2) = v_2$\\ \end{specialcode} Clearly all assignments could be treated as \textit{weak updates} [.pntstr.], where a weak update is an update that may or may not take place. However, this would involve discarding too much information; assignments would only add to the values associated with variables and not replace the values. Therefore assignments where the left hand side evaluates to a set containing a single variable are treated as special cases. These are implemented as \textit{strong updates}. \section*{Model 3: A Finite Type System} \addcontentsline{toc}{section}{Model 3: A Finite Type System} The environments in Model 2 can contain infinite amounts of information, as in the program \goodbreak \begin{iconcode} \>x := 1\\ \>repeat x +:= 1\\ \end{iconcode} \noindent where the set of values associated with x in the loop consists of all the counting numbers. Because equations in Model 2 can involve arbitrary arithmetic, no algorithm can find the least fixed point of an arbitrary set of these equations. The final step is to impose a finitely representable type system on values. A type is a (possibly infinite) set of values. The type system presented here includes three classifications of basic types. The first classification consists of the Icon types without pointer semantics: integers, strings, csets, etc. The second classification groups pointers together according to the lexical point of their creation. This is similar to the method used to handle recursive data structures in Jones and Muchnick [.analrcsv.]. Consider the code \iconline{ \>every insert(x, [1 to 5]) } If this code is executed once, five lists are created, but they are all created at the same point in the program, so they all belong to the same type. The intuition behind this choice of types is that structures created at the same point in a program are likely to have components of the same type, while structures created at different points in a program may have components of different types. The third classification of basic types handles variable references. Each named variable and temporary variable is given a type to itself. Therefore, if \texttt{a} is a named variable, \texttt{\{a\}} is a type. Structure variables are grouped into types according to the program point where the pointer to the structure is created. This is not necessarily the point where the variable is created; in the following code, a pointer to a list is created at one program point, but variables are added to the list at different points \goodbreak \begin{iconcode} \ \ x := []\\ \ \ push(x, 1)\\ \ \ push(x ,2)\\ \end{iconcode} References to these variables are grouped into a type associated with the program point for \texttt{[]}, not the point for the corresponding push. If a program contains k non-structure variables and there are n locations where pointers can be created, then the basic types for the program are integer, string, ..., P\TextSubscript{1}, ..., P\TextSubscript{n}, V\TextSubscript{1}, ..., V\TextSubscript{n}, \{v\TextSubscript{1}\}, ..., \{v\TextSubscript{k}\} where P\TextSubscript{i} is the pointer type created at location i, V\TextSubscript{i} is the variable type associated with P\TextSubscript{i}, and v\TextSubscript{i} is a named variable or a temporary variable. Because programs are lexically finite they each have a finite number of basic types. The set of all types for a program is the smallest set that is closed under union and contains the empty set along with the basic types: \begin{specialcode}{} \>types = \{\{\}, integers, strings,...,~% (integers ${\cup}$ \ strings),...,~% (integers ${\cup}$ strings ${\cup}$ ... ${\cup}$ \{v\TextSubscript{k}\})\} \end{specialcode} Model 3 replaces the arbitrary sets of values of Model 2 by types. This replacement reduces the precision of the information, but allows for a finite representation and allows the information to be computed in finite time. In Model 3, both the store and the heap map types to types. This store is referred to as the \textit{type store}. The domain of type store is \textit{variable types}, that is, those types whose only values are variable references. Similarly, the domain of the heap is \textit{pointer types}. Its range is the set types containing only structure variables. A set of values from Model 2 is converted to a type in Model 3 by mapping that set to the smallest type containing it. For example, the set \iconline{ \>\{1, 4, 5, "23", "0"\} } \noindent is mapped to \begin{specialcode}{} \>integer ${\cup}$ string\\ \end{specialcode} \noindent The definition of envir$_{[3]}$ is \goodbreak \begin{specialcode}{} \>envir$_{[3]} = $ store$_{[3]} \times $ heap$_{[3]}$\\ \>store$_{[3]} = $ variable-types $\rightarrow $ types\\ \>heap$_{[3]} = $ pointer-types $\rightarrow $ structure-variable-types\\ \>types ${\subseteq}$ 2\textsuperscript{values}\\ \>variable-types ${\subseteq}$ types\\ \>structure-variable-types ${\subseteq}$ variable-types\\ \>pointer-types ${\subseteq}$ types\\ \end{specialcode} There is exactly one variable type for each pointer type in this model. The heap simply consists of this one-to-one mapping; the heap is of the form {\ttfamily\mdseries \ \ \ $h$( P\TextSubscript{i} ) = V\TextSubscript{i}} This mapping is invariant over a given program. Therefore, the type equations for a program can be defined over store$_{[3]}$ rather than envir$_{[3]}$ with the heap embedded within the type equations. Suppose an environment from Model 2 is \goodbreak \begin{specialcode}{} \> $e \in $ envir$_{[2]}$\\ \> $e = (s, h)$\\ \\ \> $s(\{$\texttt{a}$\}) = \{ p_1 , p_2\}$\\ \> $s(\{v_1\}) = \{1, 2\}$\\ \> $s(\{v_2\}) = \{1\}$\\ \> $s(\{v_3\}) = \{12.03\}$\\ \\ \> $h(\{p_1\}) = \{L_1, L_2\}$\\ \> $h(\{p_2\}) = \{L_3\}$\\ \\ \> $L_1(1) = v_1$\\ \\ \> $L_2(1) = v_1$\\ \> $L_2(2) = v_2$\\ \\ \> $L_3(1) = v_3$\\ \end{specialcode} Suppose the pointers p\TextSubscript{1} and p\TextSubscript{2} are both created at program point 1. Then the associated pointer type is P\TextSubscript{1} and the associated variable type is V\TextSubscript{1}. The corresponding environment in Model 3 is \goodbreak \begin{specialcode}{} \>$\hat{e} \in $ envir$_{[3]}$\\ \>$\hat{e} = (\hat{s},\hat{h})$\\ \>$\hat{s}(\{$\texttt{a}$\}) = $ P$_1$\\ \>$\hat{s}($V$_1) = $ integer $\cup$ real\\ \>$\hat{h}($P$_1) = $ V$_1$\\ \end{specialcode} The collecting semantics of a program establishes a set of (possibly) recursive equations between the sets of environments on the edges of the program's flow graph. The collecting semantics of the program is the least fixed point of these equations in which the set on the edge entering the start state contains all possible initial environments. Similarly, type inferencing establishes a set of recursive equations between the type stores on the edges of the flow graph. The least fixed point of these type inferencing equations is computable using iterative methods. This is discussed in Chapter 19. The fact that these equations have solutions is due to the fact that the equations in the collecting semantics have a solution and the fact the each abstraction maintains the ``structure'' of the problem, simply discarding some details. Chapter 19 also extends type inferencing to handle the entire Icon language. Chapter 22 uses the information from type inferencing to optimize the generated code.
{ "alphanum_fraction": 0.7280884756, "avg_line_length": 39.9849246231, "ext": "tex", "hexsha": "3c58d28161014aafbf02c1b5b4047e24a58324fb", "lang": "TeX", "max_forks_count": 16, "max_forks_repo_forks_event_max_datetime": "2022-03-01T06:01:00.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-14T04:32:36.000Z", "max_forks_repo_head_hexsha": "df79234dc1b8a4972f3908f601329591c06bd141", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "jschnet/unicon", "max_forks_repo_path": "doc/ib/p2-typeTheory.tex", "max_issues_count": 83, "max_issues_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_issues_repo_issues_event_max_datetime": "2022-03-22T11:32:35.000Z", "max_issues_repo_issues_event_min_datetime": "2019-11-03T20:07:12.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "MatthewCLane/unicon", "max_issues_repo_path": "doc/ib/p2-typeTheory.tex", "max_line_length": 94, "max_stars_count": 35, "max_stars_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "MatthewCLane/unicon", "max_stars_repo_path": "doc/ib/p2-typeTheory.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-01T06:00:40.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-29T13:19:55.000Z", "num_tokens": 11012, "size": 39785 }
\documentclass{article} %% \usepackage{indentfirst} \usepackage{fullpage} \usepackage{html} \begin{document} \title{Annotating Java Class Files with \\ Array Bounds Check and Null Pointer Check Information} \author{Feng Qian (\htmladdnormallink{[email protected]} {mailto:[email protected]})} \date{\today} \maketitle This note explains how to use Soot annotation options to add array bounds check and null pointer check attributes to a class file and how to use these attributes in a JIT or ahead-of-time compiler. \section{Array References and Object References} Java requires array bounds checks when accessing arrays, and null pointer checks when accessing objects. Array bounds checks are implemented at the virtual machine level by inserting comparison instructions before accessing an array element. Most of operating systems can raise a hardware exception when a bytecode accesses a null pointer, so the nullness check on an object reference is free at most of the time. However, some bytecodes, like the {\it invokespecial} and {\tt athrow} instructions, do need explicit comparison instructions to detect null pointers. Both of these safety checking mechanisms do cause heavy runtime overhead. Soot provides static analyses for detecting safe array and object accesses in a method. These analyses mark array and object reference bytecodes as either safe or unsafe. The results of these analyses are encoded into the class file as attributes, which can then be understood by an interpreter or JIT compiler. If a bytecode is marked as safe in its attribute, the associated comparison instructions can be eliminated. This can speed up the execution of Java applications. Our process of encoding class files with attributes is called {\em annotation}. Soot can be used as a compiler framework to support any attributes you would like to define; they can then be encoded into the class file. The process of adding new analyses and attributes is documented in ``Adding attributes to class files via Soot''. % there is a latex2html command that lets you provide a hyperlink. % See the other tutorials. \section{Annotation options in Soot} \subsection{Description of new options} Soot has new command-line options {\tt-annot-nullpointer} and {\tt-annot-arraybounds} to enable the phases required to emit null pointer check and array bounds check annotations, respectively. Soot has some phase options to configure the annotation process. These phase options only take effect when annotation is enabled. Note that the array bounds check analysis and null pointer check analysis constitute two different phases, but that the results are combined and stored in the same attribute in the class files. The null pointer check analysis has the phase name ``{\em jap.npc}''. It has one phase option (aside from the default option {\em enabled}). \begin{description} \item[-p jap.npc only-array-ref]\ \\ By default, all bytecodes that need null pointer checks are annotated with the analysis result. When this option is set to true, Soot will annotate only array reference bytecodes with null pointer check information; other bytecodes, such as {\tt getfield} and {\tt putfield}, will not be annotated. \end{description} Soot also has phase options for the array bounds check analysis. These options affect three levels of analyses: intraprocedural, class-level, and whole-program. The array bounds check analysis has the phase name ``{\em jap.abc}''. If the whole-program analysis is required, an extra phase ``{\em wjap.ra}'' for finding rectangular arrays is required. This phase can be also enabled with phase options. By default, our array bounds check analysis is intraprocedual, since it only examines local variables. This is fast, but conservative. Other options can improve the analysis result; however, it will usually take longer to carry out the analysis, and some options assume that the application is single-threaded. \begin{description} \item[-p jap.abc with-cse]\ \\ The analysis will consider common subexpressions. For example, consider the situation where {\tt r1} is assigned {\tt a*b}; later, {\tt r2} is assigned {\tt a*b}, where both {\tt a} and {\tt b} have not been changed between the two statements. The analysis can conclude that {\tt r2} has the same value as {\tt r1}. Experiments show that this option can improve the result slightly. \item[-p jap.abc with-arrayref]\ \\ With this option enabled, array references can be considered as common subexpressions; however, we are more conservative when writing into an array, because array objects may be aliased. NOTE: We also assume that the application in a single-threaded program or in a synchronized block. That is, an array element may not be changed by other threads between two array references. % see my thesis for an example of what to do when you have contention! -plam \item[-p jap.abc with-fieldref]\ \\ The analysis treats field references (static and instance) as common subexpressions. The restrictions from the `{\tt with-arrayref}' option also apply. \item[-p jap.abc with-classfield]\ \\ This option makes the analysis work on the class level. The algorithm analyzes `final' or `private' class fields first. It can recognize the fields that hold array objects with constant length. In an application using lots of array fields, this option can improve the analysis results dramatically. \item[-p jap.abc with-all]\ \\ A macro. Instead of typing a long string of phase options, this option will turn on all options of the phase ``{\em jap.abc}''. \item[-p jap.abc with-rectarray, -p wjap.ra with-wholeapp]\ \\ These two options are used together to make Soot run the whole-program analysis for rectangular array objects. This analysis is based on the call graph, and it usually takes a long time. If the application uses rectangular arrays, these options can improve the analysis result. \end{description} \subsection{Examples} Annotate the benchmark in class file mode with both analyses. \begin{verbatim} java soot.Main -annot-nullpointer -annot-arraybounds spec.benchmarks._222_mpegaudio.Main \end{verbatim} The options for rectangular array should be used in application mode. For example: \begin{verbatim} java soot.Main --app -annot-arraybounds -annot-arraybounds -p wjap.ra with-wholeapp -p jap.abc with-all spec.benchmarks._222_mpegaudio.Main \end{verbatim} The following command only annotates the array reference bytecodes. \begin{verbatim} java soot.Main -annot-arraybounds -annot-arraybounds -jap.npc only-array-ref spec.benchmarks._222_mpegaudio.Main \end{verbatim} \section{Using attributes in the Virtual Machine} The array bounds check and null pointer check information is encoded in a single attribute in a class file. The attribute is called {\tt ArrayNullCheckAttribute}. When a VM reads in the class file, it can use the attribute to avoid generating comparison instructions for the safe bounds and nullness checks. All array reference bytecodes, such as {\em ?aload, ?store} will be annotated with bounds check information. Bytecodes that need null pointer check are listed below: \begin{verbatim} ?aload ?astore getfield putfield invokevirtual invokespecial invokeinterface arraylength monitorenter monitorexit athrow \end{verbatim} The attributes in the class file are organized as a table. If a method has been annotated, it will have an {\tt ArrayNullCheckAttribute} attribute on its {\tt Code\_attribute}. The data structure is defined as: \begin{verbatim} array_null_check_attribute { u2 attribute_name_index; u4 attribute_length; u3 attribute[attribute_length/3]; } \end{verbatim} The attribute data consist of 3-byte entries. Each entry has the first two bytes indicating the PC of the bytecode it belongs to; the third byte is used to represent annotation information. \begin{verbatim} soot_attr_entry { u2 PC; u1 value; } \end{verbatim} Entries are sorted by PC in ascending order when written into the class file. The right-most two bits of the `{\em value}' byte represent upper and lower bounds information. The third bit from right is used for nullness annotation. Other bits are not used and set to zero. The bit value `1' indicates the check is needed, and 0 represents a known-to-be-safe access. In general, only when both lower and upper bounds are safe can the check instructions be eliminated. However, sometimes this depends on the VM implementation. \begin{verbatim} 0 0 0 0 0 N U L N : nullness check U : upper bounds check L : lower bounds check \end{verbatim} For example, the attribute data should be interpreted as: \begin{verbatim} 0 0 0 0 0 1 x x // need null check 0 0 0 0 0 0 x x // no null check // x x represent array bound check. 0 0 0 0 0 0 0 0 // do not need null check or array bounds check 0 0 0 0 0 1 0 0 // need null check, but not array bounds check \end{verbatim} \section*{Other information} The detailed annotation process is described in our technical report. The array bounds check analysis algorithm will show up in another technical report. There is a tutorial describing how to develop other annotation attributes using Soot. \section*{Change log} \begin{itemize} \item October 2, 2000: Initial version. \end{itemize} \end{document}
{ "alphanum_fraction": 0.7559307987, "avg_line_length": 36.7034220532, "ext": "tex", "hexsha": "128984a12d6d5e7919059780ef6b681364d87fab", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:58:38.000Z", "max_forks_repo_forks_event_min_datetime": "2022-03-14T19:58:38.000Z", "max_forks_repo_head_hexsha": "23de49765326f09f642b7097b7334facec0e96c3", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "UCLA-SEAL/JShrink", "max_forks_repo_path": "code/jshrink/soot/tutorial/useannotation/useannotation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "23de49765326f09f642b7097b7334facec0e96c3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "UCLA-SEAL/JShrink", "max_issues_repo_path": "code/jshrink/soot/tutorial/useannotation/useannotation.tex", "max_line_length": 92, "max_stars_count": 1, "max_stars_repo_head_hexsha": "23de49765326f09f642b7097b7334facec0e96c3", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "UCLA-SEAL/JShrink", "max_stars_repo_path": "code/jshrink/soot/tutorial/useannotation/useannotation.tex", "max_stars_repo_stars_event_max_datetime": "2019-12-07T16:13:03.000Z", "max_stars_repo_stars_event_min_datetime": "2019-12-07T16:13:03.000Z", "num_tokens": 2255, "size": 9653 }
\documentclass[a4paper,twoside]{article} \usepackage{epsfig} \usepackage{subfigure} \usepackage{calc} \usepackage{amssymb} \usepackage{amstext} \usepackage{amsmath} \usepackage{amsthm} \usepackage{multicol} \usepackage{pslatex} \usepackage{apalike} \usepackage{SCITEPRESS} % Please add other packages that you may need BEFORE the SCITEPRESS.sty package. \subfigtopskip=0pt \subfigcapskip=0pt \subfigbottomskip=0pt \begin{document} \title{Authors' Instructions \subtitle{Preparation of Camera-Ready Contributions to SCITEPRESS Proceedings} } \author{\authorname{First Author Name\sup{1}, Second Author Name\sup{1} and Third Author Name\sup{2}} \affiliation{\sup{1}Institute of Problem Solving, XYZ University, My Street, MyTown, MyCountry} \affiliation{\sup{2}Department of Computing, Main University, MySecondTown, MyCountry} \email{\{f\_author, s\_author\}@ips.xyz.edu, t\[email protected]} } \keywords{The paper must have at least one keyword. The text must be set to 9-point font size and without the use of bold or italic font style. For more than one keyword, please use a comma as a separator. Keywords must be titlecased.} \abstract{The abstract should summarize the contents of the paper and should contain at least 70 and at most 200 words. The text must be set to 9-point font size.} \onecolumn \maketitle \normalsize \vfill \section{\uppercase{Introduction}} \label{sec:introduction} \noindent Your paper will be part of the conference proceedings therefore we ask that authors follow the guidelines explained in this example in order to achieve the highest quality possible \cite{Smith98}. Be advised that papers in a technically unsuitable form will be returned for retyping. After returned the manuscript must be appropriately modified. \section{\uppercase{Manuscript Preparation}} \noindent We strongly encourage authors to use this document for the preparation of the camera-ready. Please follow the instructions closely in order to make the volume look as uniform as possible \cite{Moore99}. Please remember that all the papers must be in English and without orthographic errors. Do not add any text to the headers (do not set running heads) and footers, not even page numbers, because text will be added electronically. For a best viewing experience the used font must be Times New Roman, except on special occasions, such as program code \ref{subsubsec:program_code}. \subsection{Manuscript Setup} \noindent The template is composed by a set of 7 files, in the following 2 groups:\\ \noindent {\bf Group 1.} To format your paper you will need to copy into your working directory, but NOT edit, the following 4 files: \begin{verbatim} - apalike.bst - apalike.sty - article.cls - scitepress.sty \end{verbatim} \noindent {\bf Group 2.} Additionally, you may wish to copy and edit the following 3 example files: \begin{verbatim} - example.bib - example.tex - scitepress.eps \end{verbatim} \subsection{Page Setup} The paper size must be set to A4 (210x297 mm). The document margins must be the following: \begin{itemize} \item Top: 3,3 cm; \item Bottom: 4,2 cm; \item Left: 2,6 cm; \item Right: 2,6 cm. \end{itemize} It is advisable to keep all the given values because any text or material outside the aforementioned margins will not be printed. \subsection{First Section} This section must be in one column. \vfill \subsubsection{Title and Subtitle} Use the command \textit{$\backslash$title} and follow the given structure in "example.tex". The title and subtitle must be with initial letters capitalized (titlecased). If no subtitle is required, please remove the corresponding \textit{$\backslash$subtitle} command. In the title or subtitle, words like "is", "or", "then", etc. should not be capitalized unless they are the first word of the subtitle. No formulas or special characters of any form or language are allowed in the title or subtitle. \subsubsection{Authors and Affiliations} Use the command \textit{$\backslash$author} and follow the given structure in "example.tex". \subsubsection{Keywords} Use the command \textit{$\backslash$keywords} and follow the given structure in "example.tex". Each paper must have at least one keyword. If more than one is specified, please use a comma as a separator. The sentence must end with a period. \subsubsection{Abstract} Use the command \textit{$\backslash$abstract} and follow the given structure in "example.tex". Each paper must have an abstract up to 200 words. The sentence must end with a period. \subsection{Second Section} Files "example.tex" and "example.bib" show how to create a paper with a corresponding list of references. This section must be in two columns. Each column must be 7,5-centimeter wide with a column spacing of 0,8-centimeter. The section text must be set to 10-point. Section, subsection and sub-subsection first paragraph should not have the first line indent. To remove the paragraph indentation (only necessary for the sections), use the command \textit{$\backslash$noindent} before the paragraph first word. If you use other style files (.sty) you MUST include them in the final manuscript zip file. \subsubsection{Section Titles} The heading of a section title should be in all-capitals. Example: \textit{$\backslash$section\{FIRST TITLE\}} \vfill \subsubsection{Subsection Titles} The heading of a subsection title must be with initial letters capitalized (titlecased). Words like "is", "or", "then", etc. should not be capitalized unless they are the first word of the subsection title. Example: \textit{$\backslash$subsection\{First Subtitle\}} \subsubsection{Sub-Subsection Titles} The heading of a sub subsection title should be with initial letters capitalized (titlecased). Words like "is", "or", "then", etc should not be capitalized unless they are the first word of the sub subsection title. Example: \textit{$\backslash$subsubsection\{First Subsubtitle\}} \subsubsection{Tables} Tables must appear inside the designated margins or they may span the two columns. Tables in two columns must be positioned at the top or bottom of the page within the given margins. To span a table in two columns please add an asterisk (*) to the table \textit{begin} and \textit{end} command. Example: \textit{$\backslash$begin\{table*\}} \hspace*{1.5cm}\textit{$\backslash$end\{table*\}}\\ Tables should be centered and should always have a caption positioned above it. The font size to use is 9-point. No bold or italic font style should be used. The final sentence of a caption should end with a period. \begin{table}[h] \caption{This caption has one line so it is centered.}\label{tab:example1} \centering \begin{tabular}{|c|c|} \hline Example column 1 & Example column 2 \\ \hline Example text 1 & Example text 2 \\ \hline \end{tabular} \end{table} \begin{table}[h] \caption{This caption has more than one line so it has to be justified.}\label{tab:example2} \centering \begin{tabular}{|c|c|} \hline Example column 1 & Example column 2 \\ \hline Example text 1 & Example text 2 \\ \hline \end{tabular} \end{table} Please note that the word "Table" is spelled out. \subsubsection{Figures} Please produce your figures electronically, and integrate them into your document and zip file. Check that in line drawings, lines are not interrupted and have a constant width. Grids and details within the figures must be clearly readable and may not be written one on top of the other. Figure resolution should be at least 300 dpi. Figures must appear inside the designated margins or they may span the two columns. Figures in two columns must be positioned at the top or bottom of the page within the given margins. To span a figure in two columns please add an asterisk (*) to the figure \textit{begin} and \textit{end} command. Example: \textit{$\backslash$begin\{figure*\}} \hspace*{1.5cm}\textit{$\backslash$end\{figure*\}} Figures should be centered and should always have a caption positioned under it. The font size to use is 9-point. No bold or italic font style should be used. \begin{figure}[!h] %\vspace{-0.2cm} \centering {\epsfig{file = SCITEPRESS.eps, width = 5.5cm}} \caption{This caption has one line so it is centered.} \label{fig:example1} \end{figure} \begin{figure}[!h] \vspace{-0.2cm} \centering {\epsfig{file = SCITEPRESS.eps, width = 5.5cm}} \caption{This caption has more than one line so it has to be justified.} \label{fig:example2} \vspace{-0.1cm} \end{figure} The final sentence of a caption should end with a period. Please note that the word "Figure" is spelled out. \subsubsection{Equations} Equations should be placed on a separate line, numbered and centered.\\The numbers accorded to equations should appear in consecutive order inside each section or within the contribution, with the number enclosed in brackets and justified to the right, starting with the number 1. Example: \begin{equation}\label{eq1} a=b+c \end{equation} \subsubsection{Program Code}\label{subsubsec:program_code} Program listing or program commands in text should be set in typewriter form such as Courier New. Example of a Computer Program in Pascal: \begin{small} \begin{verbatim} Begin Writeln('Hello World!!'); End. \end{verbatim} \end{small} The text must be aligned to the left and in 9-point type. \vfill \subsubsection{Reference Text and Citations} References and citations should follow the Harvard (Author, date) System Convention (see the References section in the compiled manuscript). As example you may consider the citation \cite{Smith98}. Besides that, all references should be cited in the text. No numbers with or without brackets should be used to list the references. References should be set to 9-point. Citations should be 10-point font size. You may check the structure of "example.bib" before constructing the references. For more instructions about the references and citations usage please see the appropriate link at the conference website. \section{\uppercase{Copyright Form}} \noindent For the mutual benefit and protection of Authors and Publishers, it is necessary that Authors provide formal written Consent to Publish and Transfer of Copyright before publication of the Book. The signed Consent ensures that the publisher has the Author's authorization to publish the Contribution. The copyright form is located on the authors' reserved area. The form should be completed and signed by one author on behalf of all the other authors. \section{\uppercase{Conclusions}} \label{sec:conclusion} \noindent Please note that ONLY the files required to compile your paper should be submitted. Previous versions or examples MUST be removed from the compilation directory before submission. We hope you find the information in this template useful in the preparation of your submission. \section*{\uppercase{Acknowledgements}} \noindent If any, should be placed before the references section without numbering. To do so please use the following command: \textit{$\backslash$section*\{ACKNOWLEDGEMENTS\}} \vfill \bibliographystyle{apalike} {\small \bibliography{example}} \section*{\uppercase{Appendix}} \noindent If any, the appendix should appear directly after the references without numbering, and not on a new page. To do so please use the following command: \textit{$\backslash$section*\{APPENDIX\}} \vfill \end{document}
{ "alphanum_fraction": 0.771917569, "avg_line_length": 31.635359116, "ext": "tex", "hexsha": "14136090682b44729d8c60eba08956b57e5ac656", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-01-19T06:42:03.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-19T06:42:03.000Z", "max_forks_repo_head_hexsha": "ffaaf7c0097e8fe3600dfbbe7bdd1592a7bf9a66", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Fenrir12/Master_BCG_EEG", "max_forks_repo_path": "Redaction/Papers/BIOSTEC Template/Example.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ffaaf7c0097e8fe3600dfbbe7bdd1592a7bf9a66", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Fenrir12/Master_BCG_EEG", "max_issues_repo_path": "Redaction/Papers/BIOSTEC Template/Example.tex", "max_line_length": 356, "max_stars_count": 2, "max_stars_repo_head_hexsha": "ffaaf7c0097e8fe3600dfbbe7bdd1592a7bf9a66", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "Fenrir12/Master_BCG_EEG", "max_stars_repo_path": "Redaction/Papers/BIOSTEC Template/Example.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-27T14:17:34.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-24T13:39:10.000Z", "num_tokens": 2904, "size": 11452 }
\documentclass{stdlocal} \begin{document} \section{Mathematical Proofs} % (fold) \label{sec:proofs} \begin{lemma*}[Monte Carlo Integration Estimates Value of Integral] Choose the same setting as in the above definition \ref{definition:monte-carlo-integration}. In this case for all $n\in\setNatural$, the Monte Carlo integration $\mathrm{MCI}_n(f)$ is a Monte Carlo method and the following statements for the expectation value and standard deviation are fulfilled. \[ \expect \mathrm{MCI}_n(f) = \integral{U}{}{f}{λ} \separate \stddev\boxBrackets{\mathrm{MCI}_n(f)} \leq \sqrt{\frac{λ(U)}{n} \integral{U}{}{f^2}{λ}} \] \end{lemma*} \begin{proof}[Lemma \ref{lemma:monte-carlo-integration} on page \pageref{lemma:monte-carlo-integration}] Let $p$ be the probability density of $X_n$. Because the random variables are uniformly distributed on $U$, we can express it as follows. \[ \function{p}{U}{[0,\infty)} \separate p(x) \define \frac{1}{λ(U)} \] By using substitution and chaining from propositions \ref{proposition:substitution} and \ref{proposition:chaining}, the expectation value can be directly computed. \[ \begin{aligned}[t] \expect \mathrm{MCI}_n(f) &= \expect \boxBrackets{ \frac{λ(U)}{n} \sum_{k=1}^n f\circ X_k } = \frac{λ(U)}{n} \sum_{k=1}^n \expect(f\circ X_k) \\ &= λ(U) \integral{U}{}{f(x) p(x)}{λ(x)} = \integral{U}{}{f}{λ} \end{aligned} \] For the standard deviation, first the variance will be observed. Since the sequence of random variables is stochastically independent, the sum can be taken out of the argument. Afterwards, we again apply substitution and chaining. \[ \begin{aligned} \var \mathrm{MCI}_n(f) &= \var\boxBrackets{ \frac{λ(U)}{n} \sum_{k=1}^n f\circ X_k } = \frac{λ(U)^2}{n^2} \sum_{k=1}^n \var\roundBrackets{f\circ X_k} \\ &= \frac{λ(U)^2}{n^2} \sum_{k=1}^n \expect\roundBrackets{f\circ X_k}^2 - \boxBrackets{\expect\roundBrackets{f\circ X_k}}^2 \\ &\leq \frac{λ(U)^2}{n^2} \sum_{k=1}^n \expect\roundBrackets{f\circ X_k}^2 = \frac{λ(U)^2}{n} \integral{U}{}{f^2(x) p(x)}{λ(x)} \\ &= \frac{λ(U)}{n} \integral{U}{}{f^2}{λ} \end{aligned} \] The inequality is now inferred by the definition of the standard deviation which proofs the lemma. \[ \stddev\boxBrackets{\mathrm{MCI}_n(f)} = \sqrt{\var \mathrm{MCI}_n(f)} \leq \sqrt{\frac{λ(U)}{n} \integral{U}{}{f^2}{λ}} \] \end{proof} \begin{lemma*}[Pseudorandom Sequences are Ultimately Periodic] Let $\mathscr{G}\define (S,T,U,G)$ be a PRNG and $s_0\in S$ its initial state. Then the respective pseudorandom sequence $(u_n)_{n\in\setNatural}$ is ultimately periodic. In this case, for the period ρ and the transient τ the following holds. \[ 1 \leq ρ + τ - 1 \leq \# S \] In particular, if $T$ is bijective $(u_n)$ will be periodic. \end{lemma*} \begin{proof}[Lemma~\ref{lemma:pseudorandom-sequences-periodicity} on page \pageref{lemma:pseudorandom-sequences-periodicity}] Let $(s_n)_{n\in\setNatural}$ be the respective sequence of states and $N\define \# S$ the number of different states. $T$ maps all elements of $S$ to at most $N$ other elements of $S$. Therefore at least the element $s_N$ has to be mapped to an element $s_k$ for $k\in\setNatural$ with $k\leq N$ which was already reached. % Hence, there exist $n,k\in\setNatural$ with $k\leq n\leq N$ such that $T(s_n) = s_k$. Hence, we conclude the following. \[ \exists n,k\in\setNatural, k\leq n\leq N: \quad T(s_n) = s_k \] % Assume $T$ maps $s_n$ to a state $s_k$ with $k\in\setNatural$ and $k < n$. We choose $n$ and $k$ appropriately and define the following values. \[ ρ \define n - k + 1 \separate τ \define k \] Now let $i\in\setNatural_0$ be arbitrary and apply the definition. We get the following chain of equations which show that $(u_n)$ is ultimately periodic. \[ \begin{aligned} u_{τ+i+ρ} &= u_{n+1+i} = G \circ T^{n+1+i}(s_0) = G \circ T^i\circ T^{n+1}(s_0) \\ &= G \circ T^i(s_k) = G \circ T^i \circ T^k(s_0) = G \circ T^{i+k}(s_0) = u_{k+i} = u_{τ+i} \end{aligned} \] The inequality can be shown by directly inserting the values into the definition. \[ 1 \leq ρ + τ - 1 = n \leq N = \# S \] This proofs the given lemma. \end{proof} \begin{lemma*}[Equidistributed Pseudorandom Sequences] Let $\mathscr{G}\define (S,T,U,G)$ be a PRNG with $s_0\in S$ as its seed value and $(u_n)_{n\in\setNatural}$ the respective pseudorandom sequence with transient τ and period ρ. Furthermore, let μ be a probability measure on $(U,\mathscr{P}(U))$. Then the following statements are equivalent. \begin{enumerate}[label=(\roman*)] \item $(u_n)$ is equidistributed with respect to μ. \item For all $u\in U$ the following is true. \[ \frac{1}{ρ} \cdot \#\set{n\in\setNatural}{τ\leq n < ρ+τ, u_n = u} = μ(\set{u}{}) \] \end{enumerate} \end{lemma*} \begin{proof}[Lemma~\ref{lemma:equidistribution} on page \pageref{lemma:equidistribution}] Because $U$ is a finite set, every measurable function $\function{X}{U}{\setReal}$ can be described as a linear combination of characteristic functions with respect to some real coefficients $α_u$ for all $u \in U$ in the following way. \[ X = \sum_{u\in U} α_u \mathds{1}_{\set{u}{}} \] Hence, without loss of generality, it suffices to take only characteristic functions into account. Let $u\in U$ be arbitrary. The right-hand side of the definition will then result in the following. \[ \integral{U}{}{\mathds{1}_{\set{u}{}}}{μ} = μ(\set{u}{}) \] Applying the characteristic function together with the properties of a periodic sequence to the left-hand side of the definition, looks as follows. \[ \begin{aligned} \lim_{n\to\infty} \frac{1}{n} \sum_{k=1}^n \mathds{1}_{\set{u}{}}(u_k) &= \lim_{n\to\infty} \frac{1}{n} \sum_{k=1}^{τ-1} \mathds{1}_{\set{u}{}}(u_k) + \lim_{n\to\infty} \frac{1}{n}\sum_{k=τ}^{τ+n-1} \mathds{1}_{\set{u}{}}(u_k) \\ &= \frac{1}{ρ} \sum_{k=τ}^{τ+ρ-1} \mathds{1}_{\set{u}{}}(u_k) \\ &= \frac{1}{ρ} \cdot \#\set{n\in\setNatural}{τ\leq n < ρ+τ, u_n = u} \end{aligned} \] This shows the desired equivalence and proofs the lemma. \end{proof} \begin{lemma*}[Corresponding Vector Sequences are Ultimately Periodic] Let $U$ be a non-empty set of values and $(u_n)_{n\in\setNatural}$ be an ultimately periodic sequence in $U$ with period ρ and transient τ. In this case, every corresponding $k$-dimensional vector sequence $(v_n)_{n\in\setNatural}$ with translation $t$ is ultimately periodic with period $ρ'$ and transient $τ'$ defined as follows. \[ ρ' \define \frac{ρ}{\mathrm{gcd}(ρ,k)} \separate τ' \define \ceilBrackets{\frac{\max(0,τ-1-t)}{k}} + 1 \] \end{lemma*} \begin{proof}[Lemma \ref{lemma:vector-sequences-periodicity} on page \pageref{lemma:vector-sequences-periodicity}] Choose $n\in\setNatural_0$ and $i\in\setNatural$ with $i\leq k$ to be arbitrary. We denote with $v^{(i)}_n$ the $i$.~coordinate of the $n$.~vector. By definition the following equality holds. \[ v^{(i)}_{τ' + n + ρ'} = u_{t + (τ'+n+ρ'-1)k + i} \] Observing the index, we separate it into three parts. One for the index, one for the transient one for the period. \[ t+(τ' + n + ρ' - 1)k + i = \underbrace{(t + τ'k - k + 1)}_{\reverseDefine \tilde{τ}} + \underbrace{(nk + i - 1)}_{\reverseDefine \tilde{n}} + \underbrace{ρ'k}_{\reverseDefine \tilde{ρ}} \] The period part has to be a multiple of the period ρ of $(u_n)$ as can be seen in the following. Hence, $\tilde{ρ}$ has the property of a period. \[ \tilde{ρ} = ρ'k = \frac{ρk}{\mathrm{gcd}(ρ,k)} = ρ \frac{k}{\mathrm{gcd}(ρ,k)} \] To apply the periodicity of $(u_n)$, the transient part has to be bigger or equal to the transient τ of $(u_n)$. \[ \tilde{τ} = t + τ'k - k + 1 = 1 + t + k \ceilBrackets{\frac{\max(0,τ-1-t)}{k}} \geq τ \] Inserting the results and applying the periodicity of $(u_n)$, we can conclude that the corresponding vector sequence has to be ultimately periodic as well. \[ v^{(i)}_{τ' + n + ρ'} = u_{\tilde{τ} + \tilde{n} + \tilde{ρ}} = u_{\tilde{τ} + \tilde{n}} = u_{t + (τ' + n - 1)k + i} = v^{(i)}_{τ' + n} \] Due to the shown statements, $ρ'$ and $τ'$ are indeed the smallest possible values such that this equation holds and can therefore be denoted as period and transient of $(v_n)$ respectively. \end{proof} % section proofs (end) \end{document}
{ "alphanum_fraction": 0.6303338993, "avg_line_length": 54.8757763975, "ext": "tex", "hexsha": "d385f0f0e44ef617feb48bf864eb4849fc47d8d3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c78931c1a5c0a85a1ad36d7d8979567b0853be52", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lyrahgames/random-number-generators", "max_forks_repo_path": "docs/thesis/sections/proofs.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c78931c1a5c0a85a1ad36d7d8979567b0853be52", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lyrahgames/random-number-generators", "max_issues_repo_path": "docs/thesis/sections/proofs.tex", "max_line_length": 240, "max_stars_count": 4, "max_stars_repo_head_hexsha": "c78931c1a5c0a85a1ad36d7d8979567b0853be52", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lyrahgames/random-number-generators", "max_stars_repo_path": "docs/thesis/sections/proofs.tex", "max_stars_repo_stars_event_max_datetime": "2021-04-20T00:07:23.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-28T15:12:07.000Z", "num_tokens": 3001, "size": 8835 }
% a4paper = ISO 216 standard. % Don't forget to revert to default if you want the US Letter instead. \documentclass[a4paper,11pt,titlepage,openany, leqno]{book} %: MATH PACKAGE USED %\usepackage{graphicx} \usepackage{amsmath, physics, amssymb, mathrsfs, amsthm, mathspec, fouridx, %stix %tensor } %\usepackage{tlatex} % TEXs LOGOS! %\usepackage{dtklogos} %\usepackage[bottom=3cm,top=2cm]{geometry} % Metric system, sorry;) \usepackage[left=3cm,top=3cm,right=3cm,bottom=3cm]{geometry} %: COLOR \usepackage{array} %\usepackage[dvipsnames]{xcolor} %\usepackage[]{color} %\usepackage{framed} \usepackage{colortbl} %: FOR XELATEX. SKIP IF ERROR MESSAGE \usepackage{xltxtra,xunicode} %: for HEVEA- Uncomment before using XeTeX %\usepackage[utf8]{inputenc} \usepackage[xetex]{hyperref} %\usepackage{hyperref} %\usepackage{listings} \hypersetup{% pdfborder = {0 0 0}, colorlinks, citecolor=red, filecolor=green, linkcolor=black, urlcolor=blue%cyan!50!black!90 } \defaultfontfeatures{Mapping=tex-text} % FONTS: PERSONNAL CHOICE- CAN BE CHANGED! \newfontfamily{\CMUCS}{CMU Classical Serif} \newfontfamily{\CMU}{CMU Serif} \newfontfamily{\CMUSS}{CMU Sans Serif} \newfontfamily{\fw}{CMU Typewriter Text Light} %\newfontfamily{\Arabic}{Al Bayan} % On mac OS: %\newfontfamily{\Didot}{Didot} % Alternatively, for all Unix systems: %\newfontfamily{\Didot}{Theano Didot} \def\mainfont{CMU Serif} \setmainfont{\mainfont} \setmathfont(Latin)[Uppercase=Regular,Lowercase=Regular]{\mainfont} \setmathfont(Greek)[Uppercase=Regular,Lowercase=Regular]{\mainfont} \setmathrm{\mainfont} \setmathbb{\mainfont} %\setmathit{\mainfont} \setmathtt{CMU Typewriter Text Light} %\setmathbf{CMU Serif} %: TYPOGRAPHIC CONVENTIONS, XETEX \usepackage{polyglossia} %\selectbackgroundlanguage[variant=usmax]{english} \setdefaultlanguage[variant=usmax]{english} %: ----------------------------PRIMITIVES------------------------------------- \newcommand{\insmall}[1]{\text{\small{#1}}} % Some acceptable minus sign. \def\minus{\insmall{-}} % The field C and the usual subsets R, Q, Z, N. \newcommand\usualSet[1]{{\mathbf #1}} \def\C{\usualSet{C}} \def\R{\usualSet{R}} \def\Q{\usualSet{Q}} \def\Z{\usualSet{Z}} \def\N{\usualSet{N}} % "Defined as equals to", alternative to := . \def\Def{\triangleq} % The usual scalar field = C, most of the time. \def\field{\C} % Counting numbers \newcommand\counting[1]{#1=1, 2, 3, \dots} % integers (nonnegative) \newcommand\integers[1]{#1=0, 1, 2, \dots} \newcommand{\D}{{\mathscr D}} % Indices: upper, lower \newcommand{\up}[1]{^{(#1)}} \newcommand{\low}[1]{_{#1}} \newcommand{\upnw}[2]{\fourIdx{#2}{}{}{}#1} \newcommand{\downsw}[2]{\fourIdx{}{#2}{}{}#1} \newcommand{\scriptatleft}[3]{\fourIdx{#2}{#3}{}{}#1} \newcommand{\diagscript}[3]{\fourIdx{#2}{}{}{#3}#1} %\tensor*[^x]{V}{_k} % function, relation \newcommand{\function}[1]{\mathtt{#1}} \newcommand{\relation}[2]{{#1}_{#2}} \newcommand{\f}[2]{#1(#2)} \DeclareMathOperator\supp{supp} % Sets %\renewcommand{\notin}{\tiny{\not\in}} \def\contains{\supset} \def\cuts{\cap} \DeclareMathOperator\cvxhull{co} \newcommand{\co}[1]{\cvxhull(#1)} \newcommand{\set}[2]{\{#1: #2\}} \newcommand{\singleton}[1]{\{#1\}} \newcommand{\interior}[1]{\overset{\circ}{#1}} \newcommand{\closure}[1]{\overline{#1}} % Arithmetics \newcommand{\ceil}[1]{\lceil #1 \rceilf} % Analysis \newcommand\magnitude[1]{\left\lvert\, #1 \,\right\rvert} \renewcommand{\norm}[2]{\| \,#2 \, \|_{#1}} \def\weakstar{\text{weak}^\ast\text{-}} % Topology \newcommand{\localbase}[1]{\mathscr #1} %Iverson bracket \newcommand{\boolean}[1]{\left[\,#1\,\right]} % limits \newcommand{\tendsto}[2]{\underset{#1\to #2}{\longrightarrow}} % Variables \newcommand{\varit}[1]{\mathit{#1}} \def\vart{\varit{t}} %Usual terms \def\ie{\textit{i.e.} } \def\eg{\textit{e.g.} } \def\cf{\textit{cf.\,}} \def\iif{{\bf iff} } \def\wlg{{\bf wlg }} % Without loss og generality \def\then{\Rightarrow} \def\therefore{\Rightarrow} \def\since{\Leftarrow} % Citations \newcommand{\citehere}[2]{\overset{(#1)}{#2}} \newcommand{\citeq}[1]{\citehere{#1}{=}} %\newcomand{\citeineq}[1]{\citehere{#1}{=}} \newcommand{\citeleq}[1]{\citehere{#1}{\leq}} \newcommand{\citegeq}[1]{\citehere{#1}{\geq}} \newcommand{\citeleast}[1]{\citehere{#1}{<}} \newcommand{\citegreater}[1]{\citehere{#1}{>}} \newcommand{\citesubset}[1]{\citehere{#1}{\subset}} \newcommand{\citesupset}[1]{\citehere{#1}{\supset}} \newcommand{\citethen}[1]{\citehere{#1}{\Rightarrow}} %\newcommand{\citesince}[1]{\citehere{#1}{\Leftarrow}} \newcommand{\citeresult}[2]{#1 of #2} \newcommand{\citeresultFA}[1]{\citeresult{#1}{\cite{FA}}} \newcommand{\citin}[1]{\citehere{#1}{\in}} % Misc \newcommand{\underbarwithindex}[2]{\underline{#1}\,\!_{#2}} \newcommand{\dy}[1]{{\function{dyadic}}(#1)} \def\ddy{{\function{decay}}} %--------------END OF PRIMITIVES-------------------------------- \def\ROOT{./} \def\TITLE{ Solutions to some exercises from Walter Rudin's \textit{Functional Analysis} } \def\EMAIL{} \def\AUTHOR{gitcordier} \begin{document} %\begin{abstract} % \input{\ROOT/abstract.tex} %\end{abstract} \title{\TITLE} \author{\AUTHOR} \date{\today} \maketitle % FORMAT ENUMERATION (DEFAULT. OPTIONS: ALPH, ARABIC, ROMAN,…) \renewcommand{\labelenumi}{$(\textit{\alph{enumi}}\,)$} % IF LANG = @fr %\renewcommand{\chaptername}{Chapitre} % % CHAPTER NAME : \frontmatter \tableofcontents \setcounter{chapter}{1} \input{\ROOT/notations.tex} \mainmatter %\part{Content} \setcounter{chapter}{0} \input{\ROOT/FA_mainmatter.tex} \backmatter %\part{Annex} %\renewcommand\thechapter{\Alph{chapter}} %\setcounter{chapter}{1} %\chapter{Additional results} %\newcounter{annex} %\setcounter{annex}{1} %\renewcommand\thesection{\thechapter.\arabic{annex}} %\section{Number theory} %\input{\ROOT/Annex_number_theory.tex} \bibliographystyle{plain} \bibliography{bibliography}{} \addcontentsline{toc}{chapter}{Bibliography} \end{document} %Made on \XeTeX
{ "alphanum_fraction": 0.6862481064, "avg_line_length": 26.4044444444, "ext": "tex", "hexsha": "f1bd9dda37b9fce1e5917e92dbaef7efde731463", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4d54af9cab1ce2bf512341cc1f2a0c81d7097754", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gitcordier/FunctionalAnalysis", "max_forks_repo_path": "FA_DM.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4d54af9cab1ce2bf512341cc1f2a0c81d7097754", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gitcordier/FunctionalAnalysis", "max_issues_repo_path": "FA_DM.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "4d54af9cab1ce2bf512341cc1f2a0c81d7097754", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gitcordier/FunctionalAnalysis", "max_stars_repo_path": "FA_DM.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2148, "size": 5941 }
% !TeX spellcheck = en_US % !TeX encoding = UTF-8 \documentclass{beamer} \mode<presentation> { \usetheme{Madrid} } \usepackage{graphicx, graphics} \usepackage{apacite} \usepackage[style=iso]{datetime2} \DeclareGraphicsExtensions{.pdf, .png, .jpg, .gif} \title[Cavity]{Helixco Cavity} \author{Jaewoong Lee} \institute[UNIST] { Ulsan National Institute of Science and Technology \medskip \newline \textit{[email protected]} } \date{\today} \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} \section{Methods} \begin{frame} \frametitle{t-SNE} \begin{figure}[h!] \includegraphics[width=0.6 \linewidth]{figures/mnist.png} \caption{Visualizations of handwritten digits from the MNIST data set \protect \cite{tsne1}} \end{figure} \end{frame} \begin{frame} \frametitle{Programming Methods} \begin{itemize} \item Docker \cite{docker1} \item QIIME 2 \item Scikit-learn \cite{sklearn1, sklearn2} \end{itemize} \end{frame} \begin{frame} \frametitle{QIIME 2 Workflow} \begin{figure}[h!] \includegraphics[width=0.8 \linewidth]{figures/workflow.png} \caption{QIIME 2 Workflow} \end{figure} \end{frame} \section{Proceedings} \begin{frame}[allowframebreaks] \frametitle{Yields} \begin{itemize} \item t-SNE with every bacterium \end{itemize} \end{frame} \begin{frame} \frametitle{t-SNE with every bacterium} \begin{figure} $\begin{array}{cc} \includegraphics[width=0.4 \linewidth]{figures/step14/NC.png} & \includegraphics[width=0.4 \linewidth]{figures/step14/SP.png} \\ \mbox{(a) Normal vs. Cavity} & \mbox{(b) Saliva vs. Plaque} \end{array}$ \end{figure} $\therefore$ We need to select bacteria with feature importance. \end{frame} \begin{frame}[allowframebreaks] \frametitle{Requirements} \begin{figure}[h!] \includegraphics[width=0.5 \linewidth]{figures/time.png} \end{figure} \end{frame} \begin{frame}[allowframebreaks] \frametitle{Expectations} \begin{itemize} \item Improved classification \end{itemize} \end{frame} \begin{frame}[allowframebreaks] \frametitle{References} \bibliographystyle{apacite} \bibliography{reference} \end{frame} \end{document}
{ "alphanum_fraction": 0.6029962547, "avg_line_length": 24.495412844, "ext": "tex", "hexsha": "63394df50ad5ad448a846426a4a3e51dc8c58d5b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0c8194dbd142f97c6027eec3337472a5e248bc08", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "CompbioLabUnist/Helixco_Cavity", "max_forks_repo_path": "jwlee230/Report/presentation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0c8194dbd142f97c6027eec3337472a5e248bc08", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "CompbioLabUnist/Helixco_Cavity", "max_issues_repo_path": "jwlee230/Report/presentation.tex", "max_line_length": 104, "max_stars_count": null, "max_stars_repo_head_hexsha": "0c8194dbd142f97c6027eec3337472a5e248bc08", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CompbioLabUnist/Helixco_Cavity", "max_stars_repo_path": "jwlee230/Report/presentation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 776, "size": 2670 }
\documentstyle[11pt,reduce]{article} \title{{\tt TRIGSIMP}\\ A REDUCE Package for the Simplification and Factorization of Trigonometric and Hyperbolic Functions} \date{} \author{Wolfram Koepf\\ Andreas Bernig\\ Herbert Melenk\\ ZIB Berlin \\ email: {\tt [email protected]}} \begin{document} \maketitle \section{Introduction} The REDUCE package TRIGSIMP is a useful tool for all kinds of trigonometric and hyperbolic simplification and factorization. There are three procedures included in TRIGSIMP: trigsimp, trigfactorize and triggcd. The first is for finding simplifications of trigonometric or hyperbolic expressions with many options, the second for factorizing them and the third for finding the greatest common divisor of two trigonometric or hyperbolic polynomials. To start the package it must be loaded by: {\small \begin{verbatim} 1: load trigsimp; \end{verbatim} }\noindent \section{\REDUCE{} operator {\tt trigsimp}} As there is no normal form for trigonometric and hyperbolic functions, the same function can convert in many different directions, e.g. $\sin(2x) \leftrightarrow 2\sin(x)\cos(x)$. The user has the possibility to give several parameters to the procedure {\tt trigsimp} in order to influence the direction of transformations. The decision whether a rational expression in trigonometric and hyperbolic functions vanishes or not is possible. To simplify a function {\tt f}, one uses {\tt trigsimp(f[,options])}. Example: {\small \begin{verbatim} 2: trigsimp(sin(x)^2+cos(x)^2); 1 \end{verbatim} }\noindent Possible options are (* denotes the default): \begin{enumerate} \item {\tt sin} (*) or {\tt cos} \item {\tt sinh} (*) or {\tt cosh} \item {\tt expand} (*) or {\tt combine} or {\tt compact} \item {\tt hyp} or {\tt trig} or {\tt expon} \item {\tt keepalltrig} \end{enumerate} From each group one can use at most one option, otherwise an error message will occur. The first group fixes the preference used while transforming a trigonometric expression: {\small \begin{verbatim} 3: trigsimp(sin(x)^2); 2 sin(x) 4: trigsimp(sin(x)^2,cos); 2 - cos(x) + 1 \end{verbatim} }\noindent The second group is the equivalent for the hyperbolic functions. The third group determines the type of transformations. With the default {\tt expand}, an expression is written in a form only using single arguments and no sums of arguments: {\small \begin{verbatim} 5: trigsimp(sin(2x+y)); 2 2*cos(x)*cos(y)*sin(x) - 2*sin(x) *sin(y) + sin(y) \end{verbatim} }\noindent With {\tt combine}, products of trigonometric functions are transformed to trigonometric functions involving sums of arguments: {\small \begin{verbatim} 6: trigsimp(sin(x)*cos(y),combine); sin(x - y) + sin(x + y) ------------------------- 2 \end{verbatim} }\noindent With {\tt compact}, the REDUCE operator {\tt compact} \cite{hearns} is applied to {\tt f}. This leads often to a simple form, but in contrast to {\tt expand} one doesn't get a normal form. Example for {\tt compact}: {\small \begin{verbatim} 7: trigsimp((1-sin(x)**2)**20*(1-cos(x)**2)**20,compact); 40 40 cos(x) *sin(x) \end{verbatim} }\noindent With the fourth group each expression is transformed to a trigonometric, hyperbolic or exponential form: {\small \begin{verbatim} 8: trigsimp(sin(x),hyp); - sinh(i*x)*i 9: trigsimp(sinh(x),expon); 2*x e - 1 ---------- x 2*e 10: trigsimp(e^x,trig); x x cos(---) + sin(---)*i i i \end{verbatim} }\noindent Usually, {\tt tan}, {\tt cot}, {\tt sec}, {\tt csc} are expressed in terms of {\tt sin} and {\tt cos}. It can be sometimes useful to avoid this, which is handled by the option {\tt keepalltrig}: {\small \begin{verbatim} 11: trigsimp(tan(x+y),keepalltrig); - (tan(x) + tan(y)) ---------------------- tan(x)*tan(y) - 1 \end{verbatim} }\noindent It is possible to use the options of different groups simultaneously: {\small \begin{verbatim} 12: trigsimp(sin(x)**4,cos,combine); cos(4*x) - 4*cos(2*x) + 3 --------------------------- 8 \end{verbatim} }\noindent Sometimes, it is necessary to handle an expression in different steps: {\small \begin{verbatim} 13: trigsimp((sinh(x)+cosh(x))**n+(cosh(x)-sinh(x))**n,expon); 2*n*x e + 1 ------------ n*x e 14: trigsimp(ws,hyp); 2*cosh(n*x) 15: trigsimp((cosh(a*n)*sinh(a)*sinh(p)+cosh(a)*sinh(a*n)*sinh(p)+ sinh(a - p)*sinh(a*n))/sinh(a)); cosh(a*n)*sinh(p) + cosh(p)*sinh(a*n) 16: trigsimp(ws,combine); sinh(a*n + p) \end{verbatim} }\noindent \section{\REDUCE{} operator {\tt trigfactorize}} With {\tt trigfactorize(p,x)} one can factorize the trigonometric or hyperbolic polynomial {\tt p} with respect to the argument x. Example: {\small \begin{verbatim} 17: trigfactorize(sin(x),x/2); x x {2,cos(---),sin(---)} 2 2 \end{verbatim} }\noindent If the polynomial is not coordinated or balanced \cite{art}, the output will equal the input. In this case, changing the value for x can help to find a factorization: {\small \begin{verbatim} 18: trigfactorize(1+cos(x),x); {cos(x) + 1} 19: trigfactorize(1+cos(x),x/2); x x {2,cos(---),cos(---)} 2 2 \end{verbatim} }\noindent The polynomial can consist of both trigonometric and hyperbolic functions: {\small \begin{verbatim} 20: trigfactorize(sin(2x)*sinh(2x),x); {4, cos(x), sin(x), cosh(x), sinh(x)} \end{verbatim} }\noindent \section{\REDUCE{} operator {\tt triggcd}} The operator {\tt triggcd} is an application of {\tt trigfactorize}. With its help the user can find the greatest common divisor of two trigonometric or hyperbolic polynomials. It uses the method described in \cite{art}. The syntax is: {\tt triggcd(p,q,x)}, where p and q are the polynomials and x is the smallest unit to use. Example: {\small \begin{verbatim} 21: triggcd(sin(x),1+cos(x),x/2); x cos(---) 2 22: triggcd(sin(x),1+cos(x),x); 1 \end{verbatim} }\noindent The polynomials p and q can consist of both trigonometric and hyperbolic functions: {\small \begin{verbatim} 23: triggcd(sin(2x)*sinh(2x),(1-cos(2x))*(1+cosh(2x)),x); cosh(x)*sin(x) \end{verbatim} }\noindent \section{Further Examples} With the help of the package the user can create identities: {\small \begin{verbatim} 24: trigsimp(tan(x)*tan(y)); sin(x)*sin(y) --------------- cos(x)*cos(y) 25: trigsimp(ws,combine); cos(x - y) - cos(x + y) ------------------------- cos(x - y) + cos(x + y) 26: trigsimp((sin(x-a)+sin(x+a))/(cos(x-a)+cos(x+a))); sin(x) -------- cos(x) 27: trigsimp(cosh(n*acosh(x))-cos(n*acos(x)),trig); 0 28: trigsimp(sec(a-b),keepalltrig); csc(a)*csc(b)*sec(a)*sec(b) ------------------------------- csc(a)*csc(b) + sec(a)*sec(b) 29: trigsimp(tan(a+b),keepalltrig); - (tan(a) + tan(b)) ---------------------- tan(a)*tan(b) - 1 30: trigsimp(ws,keepalltrig,combine); tan(a + b) \end{verbatim} }\noindent Some difficult expressions can be simplified: {\small \begin{verbatim} 31: df(sqrt(1+cos(x)),x,4); 4 2 2 2 (sqrt(cos(x) + 1)*( - 4*cos(x) - 20*cos(x) *sin(x) + 12*cos(x) 2 4 2 - 4*cos(x)*sin(x) + 8*cos(x) - 15*sin(x) + 16*sin(x) ))/(16 4 3 2 *(cos(x) + 4*cos(x) + 6*cos(x) + 4*cos(x) + 1)) 32: trigsimp(ws); sqrt(cos(x) + 1) ------------------ 16 33: load taylor; 34: taylor(sin(x+a)*cos(x+b),x,0,4); cos(b)*sin(a) + (cos(a)*cos(b) - sin(a)*sin(b))*x 2 - (cos(a)*sin(b) + cos(b)*sin(a))*x 2*( - cos(a)*cos(b) + sin(a)*sin(b)) 3 + --------------------------------------*x 3 cos(a)*sin(b) + cos(b)*sin(a) 4 5 + -------------------------------*x + O(x ) 3 35: trigsimp(ws,combine); sin(a - b) + sin(a + b) 2 2*cos(a + b) 3 ------------------------- + cos(a + b)*x - sin(a + b)*x - --------------*x 2 3 sin(a + b) 4 5 + ------------*x + O(x ) 3 \end{verbatim} }\noindent Certain integrals whose calculation was not possible in REDUCE (without preprocessing), are now computable: {\small \begin{verbatim} 36: int(trigsimp(sin(x+y)*cos(x-y)*tan(x)),x); 2 2 cos(x) *x - cos(x)*sin(x) - 2*cos(y)*log(cos(x))*sin(y) + sin(x) *x --------------------------------------------------------------------- 2 37: int(trigsimp(sin(x+y)*cos(x-y)/tan(x)),x); x 2 (cos(x)*sin(x) - 2*cos(y)*log(tan(---) + 1)*sin(y) 2 x + 2*cos(y)*log(tan(---))*sin(y) + x)/2 2 \end{verbatim} }\noindent Without the package, the integration fails, in the second case one doesn't receive an answer for many hours. {\small \begin{verbatim} 38: trigfactorize(sin(2x)*cos(y)**2,y/2); {2*cos(x)*sin(x), y y cos(---) + sin(---), 2 2 y y cos(---) + sin(---), 2 2 y y cos(---) - sin(---), 2 2 y y cos(---) - sin(---)} 2 2 39: trigfactorize(sin(y)**4-x**2,y); 2 2 { - sin(y) + x, - (sin(y) + x)} 40: trigfactorize(sin(x)*sinh(x),x/2); x x x x {4,cos(---),sin(---),cosh(---),sinh(---)} 2 2 2 2 41: triggcd(-5+cos(2x)-6sin(x),-7+cos(2x)-8sin(x),x/2); x x 2*cos(---)*sin(---) + 1 2 2 42: triggcd(1-2cosh(x)+cosh(2x),1+2cosh(x)+cosh(2x),x/2); x 2 2*sinh(---) + 1 2 \end{verbatim} } \begin{thebibliography}{99} \bibitem{art} Roach, Kelly: Difficulties with Trigonometrics. Notes of a talk. \bibitem{hearns} Hearn, A.C.: COMPACT User Manual. \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.5682377251, "avg_line_length": 23.1503416856, "ext": "tex", "hexsha": "53e25803c42c00592d2fdac88b7848fb5056161b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "arthurcnorman/general", "max_forks_repo_path": "packages/trigsimp/otrgsimp.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "arthurcnorman/general", "max_issues_repo_path": "packages/trigsimp/otrgsimp.tex", "max_line_length": 81, "max_stars_count": null, "max_stars_repo_head_hexsha": "5e8fef0cc7999fa8ab75d8fdf79ad5488047282b", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "arthurcnorman/general", "max_stars_repo_path": "packages/trigsimp/otrgsimp.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3174, "size": 10163 }
\subsection{A* search} If the heuristic is admissible, then a* is optimal. Intuitively because the the heuristic steers away from any suboptimal solutions. Admissible? For all nodes n , h(n)<=h*(n). where h* is true cost \(f(n)=g(n)+h(n)\) \(g(n)\) is the cost to reach \(n\) from the current position. Informed: Yes Time: Exponential Space: Big, all nodes kept in memory Complete: Yes Optimal: Yes, if the heuristic is admissible
{ "alphanum_fraction": 0.7149321267, "avg_line_length": 20.0909090909, "ext": "tex", "hexsha": "6ce6fda8ea482e79002e87bcf5fcfacd3db19dfa", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/computer/treeHeuristic/01-02-A.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/computer/treeHeuristic/01-02-A.tex", "max_line_length": 132, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/computer/treeHeuristic/01-02-A.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 125, "size": 442 }
\chapter{Introduction} \label{cpt:Introduction} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{The Sanzhi community and the Sanzhi language} \label{sec:The Sanzhi community and the Sanzhi language} Sanzhi Dargwa is an East Caucasian (i.e. Nakh-Dagestanian) language from the Dargwa (or Dargi) subbranch and belongs to the South Dargwa varieties (Glottocode: sanz1248). In the literature, there is no unique terminology referring to Dargwa languages, dialects or peoples, but several terms exist: Dargwa, Dargva, Dargi, or Darginskiy. For reasons of uniformity and unambiguousness I restrict myself to the label and the graphic representation \textit{Dargwa} and will not use the other terms. Sanzhi Dargwa is spoken by approximately 250 speakers and is critically endangered. The self-designation of the Sanzhi people is \tit{sunglan-te} (Sanzhi.person\tsc{-pl}) and the language is called \tit{sunglan ʁaj} (lit. Sanzhi.person language). More than 40 years ago, all Sanzhi speakers left the village of Sanzhi, their village of origin, in the Caucasian Mountains. Sanzhi is located in the Dakhadayevskiy rayon in central Dagestan (today part of the Russian Federation), which is predominantly inhabited by speakers of Dargwa languages. The village of Sanzhi is located on the sunny side of the Ulluchay river valley, at an altitude of about 1,500 meters (\reffig{fig:Map 2}). The closest neighboring villages are Itsari, Shari, Khuduts, Ashty, and Amukh. The distance from Makhachkala is around 200 kilometers, from the regional center of the Dakhadayevskiy rayon, Urkarakh, it is 66 kilometers, and from Derbent around 150 kilometers. There is no direct road to Sanzhi. In order to reach the village, people go to Itsari by car or minibus and then walk around six kilometers until they reach Sanzhi. Currently, the Sanzhi territory is part of the nature park Itsari. The village consists of approximately 30 houses, which are in very poor condition and not inhabited anymore (Figures~\ref{fig:Sanzhi 1}--\ref{fig:Sanzhi 3}). The only house with a roof that is relatively well kept is the former school building. Sanzhi people regularly go to Sanzhi in the summer to spend a few days fishing, berry picking, and doing other activities in their former village. The village is surrounded by terrace fields that have been used for centuries to grow crops such as rye, wheat, barley, oats, and in the recent past also carrots, radishes, potatoes, and others. The traditional occupations of the Sanzhi people were farming and breeding, in particular sheep breeding. Not far from the village, ancient rock paintings can be found that, according to the Sanzhi people, have been the subject of investigation by several researchers from Russia. Unfortunately, I was not able to find literature on the paintings or the research expeditions. \begin{figure}[p] \caption{The village of Sanzhi in 2011 (courtesy of Gadzhimurad Gadzhimuradov)} \label{fig:Sanzhi 1} \includegraphics[height=.4\textheight]{figures/Sanzhi_1.JPG} \end{figure} \begin{figure}[p] \caption{The village of Sanzhi in 2013 (courtesy of Iwona Kaliszewska)} \label{fig:Sanzhi 2} \includegraphics[width=\textwidth]{figures/Sanzhi_2.JPG} \end{figure} \begin{figure}[p] \caption{An old picture of Sanzhi, around 1957 (courtesy of the Sanzhi community)} \label{fig:Sanzhi 3} \includegraphics[width=\textwidth]{figures/Sanzhi_3.JPG} \end{figure} \begin{figure}[p] \caption{The village of Druzhba in the winter of 2014 (picture by Diana Forker)} \label{fig:Druzhba} \includegraphics[height=.4\textheight]{figures/Druzhba.jpg} \end{figure} From 1968 onwards, within a relatively short time span, all Sanzhi people moved to the lowlands to ethnically and linguistically mixed settlements. The major reason for the resettlement was the difficult life in the mountains. There was and still is no road leading to Sanzhi, and also no electricity. From grade five on, children had to walk by foot to the school in Itsari every day and in all weathers. Today, the majority of Sanzhi speakers live in the village of Druzhba in the Dagestanian lowlands (Kayakentskiy Rayon) (\reffig{fig:Druzhba}) and to a lesser extent in other settlements in Dagestan and other parts of Russia. Druzhba is an ethnically and linguistically heterogeneous settlement with speakers of other South Dargwa varieties, other East Caucasian languages such as Tabasaran, Agul, Lezgian, and Lak, and also a very few Kumyk (Turkic) and Russian speakers. In Druzhba, people make a living by working in the local vineyards that used to be part of a \textit{sovkhoz} (Soviet state farm). Many inhabitants, especially men, commute to other parts of Russia to work there and support their families back home. A map of Dagestan with Sanzhi and Druzhba is given in \reffig{fig:Map 2}. \begin{figure} \caption{Map of Dagestan} \label{fig:Map 2} \includegraphics[width=\textwidth]{figures/Dagestan_Sanzhi.png} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{The sociolinguistic situation of Sanzhi}\label{sec:The sociolinguistic situation of Sanzhi}\largerpage All languages of the Republic of Dagestan are official languages, but only 14 of them have the status of being officially written languages. Sanzhi Dargwa, like many other comparatively small languages and varieties spoken on the territory of Dagestan, does not belong to the written languages. Before the arrival of Russian in the remote parts of the central Dagestanian mountains, where the original village of Sanzhi is located, Kumyk served as the language of interethnic communication in the wider area. The main traces of contact with Kumyk are the numerous Turkic loan words (e.g. the first part in \textit{ač barq'ij} `open' originates from the Kumyk verb \textit{ač-maq}, \textit{baχča} `garden' (identical in Kumyk), \textit{qːʷaz} `goose' from Kumyk \textit{qaz}, and many more). Nevertheless, among the Sanzhi speakers with whom I worked, nobody claimed to have a significant command of Kumyk. All villages, except for one\footnote{The exception is the village of Shara that was originally inhabitated by speakers of Agul, but today it is also a Dargwa village according to my Sanzhi assistant.} in the immediate neighborhood of Sanzhi, are Dargwa villages with Dargwa varieties closely related to Sanzhi, so that communication was and still is easily possible just by sticking to one's own variety. \begin{figure} \caption{Sanzhi men at the Uraza Bayram, the holiday at the end of Ramadan in 2013 (Gadzhimurad Gadzhimuradov, who is dressed in dark clothes, is standing on the left side) (picture by Diana Forker)} \label{fig:SanzhiPeople} \includegraphics[width=\textwidth]{figures/8_uraza2013.jpg} \end{figure} Today, all Sanzhi speakers are bilingual or multilingual to various extents because they know at least some Russian. Russian serves as the main language of interethnic communication and is the only language used in education and administration, and more generally in the public sphere in Dagestan. The degree of bilingualism varies from speaker to speaker, but simplifying somewhat, it is possible to say that women of the oldest generation (60 years and older) are the only group for whom Sanzhi is the dominant language. Men of the oldest generation as well as many members of the middle generation (age 30 to 60) are more or less balanced bilinguals, and use the two languages in accordance with the different functional domains (public/official vs. private/speech community). All members of the youngest generation are dominant in Russian, but everybody has at least a passive command of Sanzhi and is able to use a simplified form of the language in communication with members of the oldest generation, e.g. in interaction between grandchildren and grandparents. Thus, the contact situation is largely language maintenance for the oldest and middle generation. Among the youngest generation language shift is observable, and it is reasonable to assume that members of the youngest generation in particular who are still children today will not pass on Sanzhi to their children. Some children and young people in Druzhba still learn Sanzhi as their first language (this depends on the family situation), but they come in contact with Russian right from the first day of their life. Russian becomes the dominant language at the latest when children start attending kindergarten. Therefore, they generally have a limited and mostly passive command of Sanzhi and prefer to speak only Russian. Sanzhi people of the young generation, including small children, speak predominantly Russian with each other. More and more Sanzhi people speak Russian not only to their neighbors in Druzhba, many of which are from other ethnic groups, but even at home. Although the people have a positive language attitude and are proud of speaking their own language, Russian is considered to be not only more prestigious, but extremely necessary for the future of their children (see \citealt{ForkerSubmitteda} for more information). Another factor influencing the linguistic situation is marriage between women and men from different ethnic groups, which usually does not lead to bilingual children acquiring both the language of the mother and of the father, but to children speaking only Russian at home, as the parents use Russian to communicate with each other. I estimate that there are only a few families left in which both husband and wife are competent Sanzhi speakers that have grown up in the village of Sanzhi. We can assume that in the past the situation must have been different and the vast majority of wives were either from Sanzhi or from the surrounding villages (Itsari, Chakhri, Kunki, Duakar, Dzilebki are the main villages of origins of mothers and wives of the Sanzhi speakers with whom I worked). Since Sanzhi Dargwa is not employed in the public domain (e.g. administration, education, media, court) the language is unwritten and used only for oral communication within the Sanzhi community. The only printed material so far is \citet{Forker.Gadzhimuradov2017}, a collection of traditional stories and other texts. In school, Sanzhi children have around two hours of mother tongue education per week, during which they learn Standard Dargwa. Sanzhi speakers do not understand literary Standard Dargwa, because Akusha Dargwa, the base for the standard language, is a Northern Dargwa variety and quite different from Sanzhi. Therefore, in spite of the school classes, Sanzhi children usually do not learn Standard Dargwa well and are not able to speak, write, or read in Standard Dargwa, or make use of the few newspapers and TV programs that exist. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Genealogical affiliation} \label{sec:Genealogical affiliation} Sanzhi (Glottocode: sanz1248) belongs to the Dargwa (Dargi) languages, which form a subgroup of the East Caucasian (Nakh-Dagestanian) language family. The exact \isi{number} of languages belonging to this family is unknown, but it can be estimated to be around 40. The internal classification of the family has not yet been unanimously resolved. \reffig{fig:classificationtree} shows one of the possible classifications (namely the classification according to \citealt[xi]{Kibrik1996}). The internal division of the Dargwa branch into subvarieties is largely taken from \citet{Korjakov2006}. Dargwa languages are commonly divided into a Northern Dargwa group and a Southern Dargwa group, whereby Sanzhi belongs to the latter. The spelling of the names for languages and varieties in \reffig{fig:classificationtree} follows the conventions established in the literature and in the recent handbooks on East Caucasian languages \citep{PolinskyInPress, KoryakovEtAllInPreparation}. Unfortunately, in a few cases this leads to differences between the spelling of a village name and the spelling of the language spoken in it (e.g. the village of Itsari vs. Icari Dargwa). \begin{figure} \caption{A family tree of East Caucasian} \label{fig:classificationtree} \small \begin{itemize} \item[] Nakh branch \item[] \qquad\tit{Chechen, Ingush, Tsova-Tush (Batsbi)} \item[] Avar-Andic-Tsezic subbranch \item[] \qquad Avar-Andic \item[] \qquad\qquad \textit{Avar} \item[] \qquad\qquad Andic \item[] \qquad\qquad\qquad\tit{Andi, Botlikh, Godoberi, Karata, Akhvakh, Bagvalal,} \item[] \qquad\qquad\qquad\tit{Tindi, Chamalal} \item[] \qquad Tsezic subbranch \item[] \qquad\qquad\tit{Tsez, Hinuq, Khwarshi, Bezhta, Hunzib} \item[] Dargwa subbranch \item[] \qquad\tit{Akusha/Standard Dargwa, Urakhi, Mugi, Tsudakhar, Gapshima-Butri,} \item[] \qquad\tit{Mjurego-Gubden, Kadar, Muiri, Mehweb, Sirkhi, Amukh-Xuduc, Shiri,} \item[] \qquad\tit{Qunqi, Icari, \tbf{Sanzhi}, Chirag, Kajtag, Kubachi-Ashti} \item[] \tit{Lak} \item[] \tit{Khinalug} \item[] Lezgic subbranch \item[] \qquad\tit{Udi, Archi, Lezgian, Agul, Tabasaran, Tsakhur, Rutul, Kryz, Budugh} \end{itemize} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Dargwa languages and the problem of the \dqt{Dargwa ethnicity}} \label{sec:Dargwa languages and the problem of the Dargwa ethnicity} Today, all languages spoken in the the Republic of Dagestan have the status of official languages (see the article 11 of the constitution of Dagestan, 2003). This includes Standard Dargwa and Russian, among others. There is a distinction between the so-called ``unwritten'' and the ``written languages'' of Dagestan. The latter are (in addition to Russian), Avar, Agul, Azerbaijani, Kumyk, Lak, Lezgian, Noghay, Rutul, Tabasaran, Tat, Tsakhur, and Chechen. Written languages of Dagestan are, in principle, taught in school and used to some extent in the media (e.g. newspapers, journals). Until 1928, speakers of Dargwa varieties used the Arabic script, but there was no standard \isi{orthography}. From 1925 onwards, the first newspaper in a Dargwa language was published \citep[15]{Abdullaev1954}. This newspaper, as well as most books and other materials, was published in Akusha Dargwa, the language which was later chosen as the basis for the literary standard Dargwa language. There are several reasons for this choice: Akusha was and still is the Dargwa variety with the most speakers, and the village of Akusha together with the surrounding villages formed an autonomous center (\textit{vol'noe obščestvo}) for a long time. In 1930 at the first Dagestanian conference on \isi{orthography}, Akusha was appointed to be the basis for the literary standard Dargwa language. In 1928, a Latin alphabet was developed for a \isi{number} of Dagestanian languages including Dargwa, Avar, Lak, Lezgian, and Tabasaran. In 1938 the policy changed completely, and for all Dagestanian literary languages Cyrillic alphabets were introduced \citep[48\tnd51]{Grenoble2003}. In the following years the Dargwa alphabet underwent several changes. Dargwa people are officially considered to be one group that shares a common ethnicity, and to speak various dialects of one and the same Dargwa language (see below for the viewpoint of linguistics on this). According to the data of the Russian census from 2010, for instance, about 510\ths000 people consider themselves to be ethnic Dargwa, and thus represent the second biggest ethnic group in Dagestan (after the Avars). The vast majority of them claim to speak Dargwa.\largerpage Dargwa languages are spoken in the central part of Dagestan (traditionally in the districts Akushinskiy, Levashinskiy, Dakhadayevskiy, Sergokalinskiy, Kaytagskiy, and also partially in the districts of Gunibskiy, Buynakskiy, Karabudakhkentskiy, and Agulskiy), in a territory with a length of about 100 km and a breadth of about 70 km (\reffig{fig:Map 3}). In the west, this area borders on Lak and Avar territory. In the north and east, the Dargwa area borders on Kumyk lands, and in the south on Tabasaran lands. \begin{figure}[t!] \caption{The East Caucasian (i.e. Nakh-Dagestanian) language family (map courtesy of Yura Koryakov)} \label{fig:Map 3} \includegraphics[scale=0.6, angle =90]{figures/NEC_color_2016.png} \end{figure} The term \textit{Dargwa} with its current reference was only introduced during Soviet times. There was a policy at the time to create names for peoples and languages that often lacked significance for the people themselves, and to introduce ethnic boundaries all over the Northern Caucasus \citep[114]{Grenoble2003}. The use of these names is nowadays fully established and is largely maintained for political reasons \citep{Shaxbanov2009}. Historically, the term \textit{Dargwa} (or \textit{Dargi}) does not refer to an ethnic group \citep[13]{Abdullaev1954}. There were seven unions of settlements in central Dagestan that referred to themselves with a proper name and the term \textit{Dargwa}: Akusha Dargwa, Bukun Dargwa, Gutsi Dargwa, Kaba Dargwa, Utsmi (or Kaytag) Dargwa, Khamur Dargwa, and Sirkha Dargwa \citep[13]{Magomedov1999}. That is, \textit{Dargwa} referred to settlement centers that consisted of a \isi{number} of small villages forming a unit, which were able to defend themselves and their own interests against enemies (\textit{vol'noe obščestvo}). Other urban centers in the north, like Kadar and Gubden, whose inhabitants are also considered to be Dargwa people today (and to speak Dargwa varieties), did not belong to those units to which the term \textit{Dargwa} was applied. They formed one administrative unit with Kumyk villages \citep[12]{Abdullaev1954}, and used Kumyk as their lingua franca (\citealt{DobrushinaDanielKoryakov}; \citealt[58\tnd59]{Wixman1980}). Similarly, there was not one single language with the name \textit{Dargwa}, but a group of related languages, in reference to which the names of the urban centers were used \citep[1]{Uslar1892}. But since Soviet times, the classification of the Dargwa varieties as dialects of one and the same Dargwa language has persisted in many publications and in all official documents (e.g. \citeb{Abdullaev1954}; \citeb{Gasanova1971}; \citeb{Museav2002}; WALS\footnote{\url{http://wals.info/}}; Ethnologue\footnote{\url{http://www.ethnologue.com/}}). Following the most recent publications on the internal classification of the East Caucasian language family \citep{Korjakov2006, Korjakov.Sumbatova2007}, the Dargwa branch consists of 19 languages and about 40 dialects (see \reffig{fig:classificationtree} above). The biggest are Akusha Dargwa (about 42\ths000 speakers), Mjurego-Gubden Dargwa (ca.~39\ths000), Urakhi Dargwa (ca.~35\ths 000), followed by Kajtag Dargwa (ca.~21\ths000), and Tsudakhar Dargwa (ca.~19\ths000). Speakers of many Dargwa languages do not understand speakers of other Dargwa varieties, and the variation between them is much bigger than between the Andic languages, another subbranch of the East Caucasian family. The break-up of the Proto-Dargwa language can be estimated to have occurred about two millennia ago (Sumbatova, p.c.). However, the exact \isi{number} of Dargwa languages is still subject to debate, because descriptions are lacking for many of the individual languages and dialects. Thus, \reffig{fig:classificationtree} will likely need to be corrected in the future. The place of the Dargwa languages inside the East Caucasian family is also debated. Some authors consider them to form a separate branch of the East Caucasian language family \citep[142]{Gigineishvili1977, Kibrik1996}, others group them together with Lak \citep{Haspelmath1993, Korjakov2006, vandenBerg2005}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Typological overview} \label{sec:Typological overview} Sanzhi Dargwa is typologically similar to other East Caucasian languages. It has a relatively large consonant inventory including pharyngeal and \isi{ejective} \isi{consonants}, and a medium \isi{number} of vowels. With respect to its morphosyntactic structure, Sanzhi is predominantly dependent-marking with a rich case inventory. The \isi{grammatical cases} are \isi{ergative}, \isi{absolutive}, \isi{dative}, and \isi{genitive}. In addition, there is a plethora of \is{spatial case}spatial cases. The morphology is concatenative and predominantly suffixing. Sanzhi has an elaborate system of TAM forms. Verbal stems come in pairs that express imperfective and \isi{perfective aspect}, and many can take spatial \is{preverb}preverbs. Salient traits of the grammar are two largely independently operating agreement systems: \isi{gender}/\isi{number} agreement and \isi{person agreement}. Gender/\isi{number} agreement operates at the phrasal and at the clause level. Within the clause, it is mainly controlled by arguments in the \isi{absolutive} case and shows up on verbs, adverbs, and on \isi{nouns} in some of the \is{spatial case}spatial cases. Person agreement operates at the clausal level only, and functions according to a person hierarchy. Sanzhi has \isi{ergative} alignment at the level of morphology. SOV is the most frequent \isi{constituent order}. Features of Dargwa languages that have attracted the attention of typologists and linguists working within various theoretical frameworks include \isi{gender} and \isi{person agreement} \citep{Sumbatova2011, Sumbatova2013, Belyaev2013, Belyaev2017a, Belyaev2017b, GanenkovForthcoming, Forker2016a}, complement constructions including \isi{reported speech} \citep{Ganenkov2012, ForkerSubmittedb}, \isi{experiencer} constructions \citep{Comrie.vandenBerg2006, Ganenkov2006, Ganenkov2013}, local and \isi{long-distance reflexivization} \citep{Forker2014}, \isi{backward control} and long-distance agreement \citep{Serdobolskaya2009, Serdobolskaya2010, Belyaev2016}, the expression of space \citep{Ganenkov2010, ForkerLTSanzhi}, \isi{information structure} \citep{Sumbatova2009, Forker.Belyaev2016, Forker2016a}, and the problem of \isi{finiteness} \citep{Kalinina.Sumbatova2007}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Literature and previous works]{Literature on Dargwa languages, Dargwa people, and previous works on Sanzhi}\largerpage \label{sec:Literature on Dargwa languages, Dargwa people, and previous works on Sanzhi} In comparison to some other Dagestanian languages, the description of Dargwa languages has a relatively long tradition. However, despite the impressive \isi{number} of monographs and articles that have been dedicated to various Dargwa languages, the scope and the quality of many of these works cannot satisfy modern scientific standards. Thus, in the following I will mention only those works that are still in use and represent valuable documentations and analyses of Dargwa. For a more detailed overview on the history of the study of Dargwa languages, see \citet{Magometov1983} and also the references in \citet{Temirbulatova2005}. The first scientific treatment of a Dargwa language (Urakhi) comes from \citet{Uslar1892}, who visited the Caucasus in the second half of the 19th century. The next key scholar is Said Abdullaev, who published a Russian-Dargwa (i.e. Akusha) dictionary and a grammar of Akusha \citep{Abdullaev1950, Abdullaev1954}. Since the 1950s, Saida Gasanova has written many articles and books about various Dargwa languages and dialects, concentrating mainly on Muiri, Mjuregi, Urakhi, and Tsudakhar \citep[e.g.][]{Gasanova1961, Gasanova1971}. Other important scholars are Zapir Abdullaev, who worked on Standard Dargwa and occasionally on Urakhi and Kajtag \citep[e.g.][]{Abdullaev1961, Abdullaev1969, Abdullaev1971, Abdullaev1986, Abdullaev1993, AbdullaevEtAl2014}, and Magomed-Said Musaev, who investigated various Dargwa varieties, including Chirag and Akusha \citep[e.g.][]{Musaev1975, Musaev1978, Musaev1983, Musaev1980a, Musaev1980b}. There are also works on Sikhi \citep{Kadibagomedov1998}, on Kajtag \citep{Temirbulatova2005} and most notably on Kubachi \citep{Magometov1963}. Recently, two new dictionaries have been published \citep{Jusupov2005, Jusupov2009}. Rasul Mutalov, one of the key participants in the language documentation project resulting in this grammar, has written a \isi{number} of papers and books on Icari Dargwa and Standard Dargwa \citep{Mutalov1992, Mutalov2002, Mutalov2018}. In \citey{vandenBerg1999}, the first book in English on a Dargwa language (Akusha), written by \citea{vandenBerg1999} was published, followed by a descriptive grammar of Icari Dargwa, which was co-authored by Nina Sumbatova and Rasul Mutalov \citep{Sumbatova.Mutalov2003}. Icari Dargwa is closely related to Sanzhi Dargwa; the two varieties are mutually intelligible and the Icari grammar was a fruitful source of inspiration for this grammar of Sanzhi. In Moscow, a group of linguists works on a \isi{number} of Dargwa languages, of which the major results are comprehensive studies of Tanti \citep{Sumbatova.Lander2014}, Shiri \citep{BelyaevInPreparation}, Mehweb \citep{DanielMehweb}, Ashti \citep{Belyaev2012} and Chirag \citep{GanenkovChiragSketch}. Other important works from the same group are \citet{Kalinina.Sumbatova2007}, \citet{Sumbatova2009, Sumbatova2010, Sumbatova2011, Sumbatova2013}, \citet{Lander2008, Lander2010}, and \citet{Serdobolskaya2009, Serdobolskaya2010}. \citet{SumbatovaInPreparation} provides a recent overview on Dargwa varieties. Sketch grammars in preparation include \citet{GanenkovChiragSketch} and \citet{ForkerSanzhiSketch}. Topics in the morphosyntax of Sanzhi and other aspects of Sanzhi have been treated in \citet{Forker2016a, Forker2014, Forker2019, ForkerSubmitteda, ForkerSubmittedb, ForkerSubmittedc}. A collection of texts with Russian translations and a Sanzhi-Russian and Russian-Sanzhi dictionary is \citet{Forker.Gadzhimuradov2017}. There is not much to say with respect to the ethnographic literature on Dargwa people. There are only two older monographs \citep{Schilling1949, Gadzieva.etal1967}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Documenting and describing Sanzhi Dargwa}\label{sec:Documenting and describing Sanzhi Dargwa}\largerpage This grammar is the result of a language documentation project, \tit{Documenting Dargi languages in Dagestan \tnd\ Shiri and Sanzhi}, funded by the DoBeS program of the Volkswagen Foundation. The project officially started in 2012 and ran until 2019. Within this project, three linguists (Diana Forker, Rasul Mutalov, Oleg Belyaev), one anthropologist (Iwona Kaliszewska), and student assistants from the Universities of Bamberg and Leipzig (André M{\"u}ller, Teresa Klemm, and Felix Anker) documented, described, and analyzed the two endangered East Caucasian languages Shiri Dargwa and Sanzhi Dargwa. \sloppy Detailed information about the project, the languages and many texts, recordings and pictures can be found on the project website.\footnote{\url{http://www.kaukaz.net/dargwa/sanzhi/lexicon/index.htm}} All materials gathered in the project are accessible upon request via the Language Archive hosted by the MPI Nijmegen.\footnote{\url{http://dobes.mpi.nl/projects/shiri_sanzhi/}} The major results of the project are, in addition to the grammar of Sanzhi, a book with narratives, legends and other texts for the Sanzhi community \citep{Forker.Gadzhimuradov2017}, the electronic corpus of Sanzhi texts with audio recordings for every text and many video recordings (around 24 hours of natural speech), and an electronic dictionary. Around 15 hours of speech have been transcribed in ELAN, translated into Russian, and are deposited in the Language Archive.\footnote{\url{https://archive.mpi.nl/}} A subcorpus of around 10 hours, which amounts to more than 46\ths000 word tokens, has been fully glossed with FLEx\footnote{\url{https://software.sil.org/fieldworks/}} and translated into Russian and English. The texts have almost exclusively been recorded by myself in the village of Druzhba. During the recordings I was accompanied by Rasul Mutalov, my fellow project member, linguist and native speaker of the neighboring Icari dialect, or by Gadzhimuard Gadzhimuradov, my main language assistant, who led the conversation and explained the aims of the project to the Sanzhi speakers. After recording the text were transcribed in ELAN by using a Cyrillic \isi{orthography} (page xvii) and by making use of the help of native speakers. They also provided a Russian translation. In the ELAN file I added a Latin transliteration following the \isi{orthography}, which is also employed in this grammar (page xvii). From the transcribed texts I chose a subcorpus, transferred the Latin transcription into FLEx, glossed it and partially added English translations to the Russian translations. The glossed corpus has been put on the internet and is freely is accessible.\footnote{\url{http://web-corpora.net/SanzhiDargwaCorpus/search/index.php?interface_language=en}} This corpus consists of 75 texts from 24 speakers of Sanzhi who were between 21 and 80 years old when the texts were recorded (mostly between 2012 and 2015). Only three of the speakers were 35 years or younger, whereas most were older than 50. Slightly more than half of the speakers were female, but the majority of texts originate from male speakers. The corpus contains the following types of texts: \begin{itemize} \item 32 fairy tales, legends, anecdotes \item 8 fairy tales translated from Standard Dargwa and Russian \item 10 autobiographical narrations and texts about the history of the village \item 4 recipes and other instructions or procedural texts \item 3 poems \item 3 natural conversations \item 11 descriptions, conversations and narratives from the \textit{\textit{Family Problems Picture Task}} \citep{SanRoqueEtAl2012} (additionally archived with PARADISEC, in the collection SocCog\footnote{\url{http://catalog.paradisec.org.au/collections/SocCog}}) \item 4 narrations produced by means of stimuli (two ``Pear Stories'', two stories ``Frog, where are you?'') \end{itemize} The natural data has been complemented by many hours of elicitation. All natural examples originating from the corpus are not further marked in this grammar. All examples which have been elicited are marked by (E). % https://corpus1.mpi.nl/ds/asv/;jsessionid=2E07D70EB3228292714D678A5572555D?0&openpath=node:1615060 \sloppy The electronic dictionary of Sanzhi was built up with Lexique Pro\footnote{\url{http://www.lexiquepro.com/}} and has been published with \textit{Dictionaria}.\footnote{\url{https://dictionaria.clld.org/contributions/sanzhi}} The dictionary contains around than 5\ths500 entries written with Cyrillic and Latin script, Russian and English translations, grammatical information, and example sentences as well as audio recordings for (almost) every entry. The dictionary is also accessible via the project homepage.\footnote{\url{http://www.kaukaz.net/dargwa/sanzhi/lexicon/index.htm}} In August 2017, my main assistant Gadzhimurad Gadzhimuradov and I were able to print a book with community materials and present it to the Sanzhi community in Druzhba (\reffig{fig:SanzhiBook}). The book contains 42 texts of various genres taken from the corpus (fairy tales, legends, anecdotes, descriptions of games and recipes, oral history, and a poem) written in the Cyrillic Sanzhi script with a sentence-by-sentence translation in Russian, as well as a Sanzhi-Russian and a simplified Russian-Sanzhi dictionary, which is also available on the project website. Within the project I have undertaken more than ten field trips to Druzhba (including two short trips to Sanzhi in 2013 and 2016) in order to gather materials on the language. My major language assistant and consultant during all these years was and is Gadzhimurad Gadzhimuradov (\reffig{fig:SanzhiPeople}), a videographer and cameraman from Druzhba, who was born in Sanzhi. After spending his first five years there, his family moved to Druzhba, but he has ever since kept close relationships with the village and is a strong patriot in the best sense. Without the support and friendship of him and his family, in particular his wife Batichay, neither the grammar nor the entire project could have been realized. Gadzhimurad Gadzhimuradov not only helped me to gather, transcribe, and translate materials, he also made many recordings by himself, translated texts into Sanzhi and raised the interest of the Sanzhi community in the project. Patiently he sat down endless hours with me to go through morphological and syntactic paradigms. This grammar could not have been written without his assistance. \begin{figure} \caption{Gadzhimurad Gadzhimuradov presenting the first book in Sanzhi (courtesy of Gadzhimurad Gadzhimuradov, 2017)} \label{fig:SanzhiBook} \includegraphics[width=\textwidth]{figures/SanzhiBooks1.jpg} \end{figure}
{ "alphanum_fraction": 0.7889047028, "avg_line_length": 152.1797235023, "ext": "tex", "hexsha": "adbc1e21429b74869d3614373ff6daccda6c4342", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e9b8a5670dbc0500e17c80aae117a5975b14ecb4", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "langsci/250", "max_forks_repo_path": "chapters/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e9b8a5670dbc0500e17c80aae117a5975b14ecb4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "langsci/250", "max_issues_repo_path": "chapters/introduction.tex", "max_line_length": 1992, "max_stars_count": null, "max_stars_repo_head_hexsha": "e9b8a5670dbc0500e17c80aae117a5975b14ecb4", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "langsci/250", "max_stars_repo_path": "chapters/introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8599, "size": 33023 }
\documentclass[en,12pt]{elegantpaper} \begin{document} \section*{1} \noindent Because $\mu_i=\beta_1+\beta_2z_i$, then \begin{align*} f(y_i|\mu_i,\sigma)&=\frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{(y_i-(\beta_1+\beta_2z_i))^2}{2\sigma^2}\right)\\ &=\frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{1}{2\sigma^2}y_i^2+\frac{\beta_1}{\sigma^2}y_i+\frac{\beta_2z_i}{\sigma^2}y_i-\frac{(\beta_1+\beta_2z_i)^2}{\sigma^2}\right). \end{align*} $\eta=(\frac{\beta_1}{\sigma^2}, \frac{\beta_2z_i}{\sigma^2}, -\frac{1}{\sqrt{2\pi}\sigma})$. So, $\mathcal{E}=\mathbb{R}\times\mathbb{R}\times(-\infty,0)$. \end{document}
{ "alphanum_fraction": 0.6180124224, "avg_line_length": 58.5454545455, "ext": "tex", "hexsha": "fcf51a145b2628d84cd1d3b8f4f196556f844023", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3af55f890c0dedfed7a4614665730002b4c3a370", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Addasecond86/MS-Stat-Tulane", "max_forks_repo_path": "Mathematical Statistics/midterm1/1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3af55f890c0dedfed7a4614665730002b4c3a370", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Addasecond86/MS-Stat-Tulane", "max_issues_repo_path": "Mathematical Statistics/midterm1/1.tex", "max_line_length": 180, "max_stars_count": null, "max_stars_repo_head_hexsha": "3af55f890c0dedfed7a4614665730002b4c3a370", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Addasecond86/MS-Stat-Tulane", "max_stars_repo_path": "Mathematical Statistics/midterm1/1.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 288, "size": 644 }
\section{\resheading{\textsc{Memberships}}} \vspace{8pt} % Reduce space between section title and contents \begin{multicols}{4} \begin{itemize} \item Agile Alliance % \item Agile Nashville \item ARRL \item ACM \item IEEE \end{itemize} \end{multicols}
{ "alphanum_fraction": 0.7579365079, "avg_line_length": 19.3846153846, "ext": "tex", "hexsha": "de510919b359081660f478a9b89195d0f6cec7be", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "905ccad09ad5344b27882fe24be1c4165eacf43f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "asphaltbuffet/cookiecutter-latex-resume", "max_forks_repo_path": "{{cookiecutter.repo_name}}/memberships.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "905ccad09ad5344b27882fe24be1c4165eacf43f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "asphaltbuffet/cookiecutter-latex-resume", "max_issues_repo_path": "{{cookiecutter.repo_name}}/memberships.tex", "max_line_length": 62, "max_stars_count": null, "max_stars_repo_head_hexsha": "905ccad09ad5344b27882fe24be1c4165eacf43f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "asphaltbuffet/cookiecutter-latex-resume", "max_stars_repo_path": "{{cookiecutter.repo_name}}/memberships.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 80, "size": 252 }
%s How the representative set is computed; pruning strategies and verification %step goes here. \section{Computing Representative Sets} \label{sec:representative} Representative vertex $v$ of a pattern vertex $u$ implies that there exists an isomorphism $\phi$ for which $\phi(u) = v$. One way to interpret it is that the neighborhood of $u$ matches with that of $v$. By comparing the neighborhoods we can find vertices that are not valid representatives of $u$ without trying to find an isomorphism exhaustively. Therefore, to compute the representative sets we will start with a candidate representative set denoted by \CR and iteratively prune some of the vertices if the neighborhoods cannot be matched. The candidate set is a super set of the representative set, $\CR \supseteq \RS$. An example of candidate set, $ \CR = \{v| v \in \vg, \matij{C}{L(u)}{L(v)} \leq \alpha \}$ i.e., the isomorphisms of the single vertex pattern with label $L(u)$. In this section, we will describe different notions of neighborhood and show how they help us in computing the representative sets of vertices in a pattern. The problem of checking whether a vertex $v \in R(u)$ involves solving isomorphism which is a NP complete problem. The pruning methods typically do not prune all the invalid vertices. So, we use an exhaustive enumeration method to prune these invalid vertices and reduce \CR to \RS. \subsection{\khop Label} %Neighbors of a vertex in a graph denotes the set of vertices that are %reachable via a single edge. \khop label is defined as the set of vertices that are reachable via a simple path of length $k$. In other words, k-hop label contains all vertices that are reachable in k-hops starting from $u$ and by visiting each vertex at most once. Note that, we use the word label even though we refer to a set of vertices. Formally, the \khop label of a vertex $u$ in graph $G$, $\khopl{k}{u,G} = \{v | v \in G, \kpath{u}{v}{k}\}$. We simply write it as $\khopl{k}{u}$ when the graph is evident from the context. For example, for pattern $P$ in Fig.~\ref{subfig:pattern}, the $0$-hop label of vertex $5$ is $h_0(5) = \{5\}$, its $1$-hop label is the multiset $h_1(5) = 2, 4, 6$ (we omit the set notation for convenience) and its $2$-hop label $h_2(5) = 1, 3$. The minimum cost of matching \khop labels $\khopl{k}{u}$ and $\khopl{k}{v}$ is \begin{equation} \label{eq:khop} \khopcost{k}{u}{v} = \text{min}\displaystyle\sum_{u' \in \khopl{k}{u}} \matij{C}{L(u')}{L(f(u'))} \end{equation} where the minimization is over all injective functions $f\!\!:\khopl{k}{u} \rightarrow \khopl{k}{v}$ and $\matij{C}{L(u')}{L(f(u'))}$ is the cost of matching the vertex labels. In other words, it is the minimum total cost of matching the vertices present in the k-hop labels. The following theorem places a upper bound on the minimum cost of matching the k-hop labels of pattern vertex and any of its representative vertices. \begin{thm} Given any pattern vertex $u$, a representative vertex $v \in R(u)$ and cost threshold $\alpha$, the minimum cost of matching the \khop labels, $\khopcost{k}{u}{v} \leq \alpha$ for all $k \geq 0$. \begin{myproof} Consider any isomorphism $\phi$ such that $\phi(u) = v$. It is enough if we can show an injective function $f\!\!:\khopl{k}{u} \rightarrow \khopl{k}{v}$ with a cost ( as defined in equation \ref{eq:khop}) $\leq \alpha$. We will argue that the function $\phi$ on the restricted domain $\khopl{k}{u}$ is one such function $f$. First, we know that $\sum\matij{C}{L(u)}{\phi(L(u)} \leq \alpha$, $u \in \vp$, since $\phi$ is an isomorphism. Second, let $\kpath{u}{u'}{k}$ then $\phi(u') \in \khopl{k}{v}$ because for every edge $(u_1, u_2)$ on a path between $u$ and $u'$ in $\pat$, $(\phi(u_1),\phi(u_2)) \in \eg$. Therefore the minimum cost of matching the \khop labels is upper bounded by $\alpha$. \end{myproof} \label{thm:khop} \end{thm} Based on the above theorem, a vertex $v$ is not a representative vertex of $u$ if $\khopcost{k}{u}{v} > \alpha$ for any $k \geq 0$. However, in practice, it enough to check the condition only for $k \leq |V_P|-1$ because $\khopl{k}{u}$ is the null set $\forall k \geq |V_P|$ and the condition is trivially satisfied. Figure~\ref{fig:ncexample} shows an example for the \khop label based pruning of the candidate representative set where the threshold $\alpha = 0.5$. Consider vertex $2 \in \vp$ and vertex $20 \in \vg$, we have, $\khopcost{0}{2}{20} = 0$, since the cost of matching vertex labels $\matij{C}{L(2)}{L(20)} = 0$ , as per the label matching matrix $C$ in Fig.~\ref{subfig:match}. The \khop labels for $k=1,2,3$ and the minimum of cost matching them are as shown in the table \ref{tab:khop220}, and it can be verified that the minimum cost is within the threshold $\alpha$. Thus far, we cannot prune node $20$ from $R'(2)$. However, $\khopl{4}{2} = 4, 5 $ and $\khopl{4}{20} = 30, 60$ and the minimum cost of matching them is $0.6 > \alpha$. Thus, we conclude that $20 \notin R'(2)$. This example illustrates that \khop labels can help prune the candidate representative sets. % Example for showing the incremental updates of the labels \begin{figure}[!ht] \captionsetup[subfloat]{captionskip=15pt} \centering \subfloat[Pattern $P$]{ \label{subfig:pattern} \scalebox{0.9}{ % # 3 vertices and 2 edge pattern \begin{pspicture}(0,0)(4,3) \cnodeput[linecolor=black](0,2) {n1} {A} \cnodeput[linecolor=black](0,1) {n2} {C} \cnodeput[linecolor=black](1,0) {n3} {B} \cnodeput[linecolor=black](2,2) {n4} {C} \cnodeput[linecolor=black](2,1) {n5} {A} \cnodeput[linecolor=black](2,0) {n6} {D} %% Draw the edges of the pattern \ncline{-}{n1}{n2} \ncline{-}{n2}{n3} \ncline{-}{n3}{n4} \ncline{-}{n2}{n5} \ncline{-}{n4}{n5} \ncline{-}{n5}{n6} \ncline{-}{n3}{n6} \uput{.3cm}[90](n1){ {1} } \uput{.3cm}[180](n2){ {2} } \uput{.3cm}[270](n3){ {3} } \uput{.3cm}[90](n4){ {4} } \uput{.3cm}[0](n5){ {5} } \uput{.3cm}[270](n6){ {6} } \end{pspicture} } } \subfloat[Database Graph $G$]{ \label{subfig:database} \scalebox{0.9}{ \begin{pspicture}(0,0)(2.5,2.5) \cnodeput[linecolor=black](0,2) {N1} {A} \cnodeput[linecolor=black](0,1) {N2} {C} \cnodeput[linecolor=black](0,0) {N3} {D} \cnodeput[linecolor=black](1,2) {N4} {B} \cnodeput[linecolor=black](2.25,1) {N5} {B} \cnodeput[linecolor=black](2,0) {N6} {A} % vertex ids \uput{.3cm}[90](N1){ {10} } \uput{.3cm}[180](N2){ {20} } \uput{.3cm}[270](N3){ {30} } \uput{.3cm}[90](N4){ {40} } \uput{.3cm}[0](N5){ {50} } \uput{.3cm}[270](N6){ {60} } % edges in the database \ncline{-}{N1}{N2} \ncline{-}{N2}{N3} \ncline{-}{N4}{N5} \ncline{-}{N5}{N6} \ncline{-}{N3}{N4} \ncline{-}{N2}{N5} \ncline{-}{N2}{N6} \end{pspicture} } } \newline \captionsetup[subfloat]{captionskip=5pt} \subfloat[Cost Matrix]{ \label{subfig:match} % Table for the search space pruning \begin{tabular}{|c|c|c|c|c|} \hline $C$ & A & B & C & D \\ \hline A & 0 & 0.7 & 0.6 & 0.1\\ \hline B & 0.7 & 0 & 0.3 & 1\\ \hline C & 0.6 & 0.3 & 0 & 0.8\\ \hline D & 0.1 & 1 & 0.8 & 0\\ \hline \end{tabular} } \caption{Pattern \protect\subref{subfig:pattern}, database graph \protect\subref{subfig:database}, and cost matrix \protect\subref{subfig:match}. } \label{fig:ncexample} \end{figure} \begin{table}[h] \centering \begin{tabular}{|c|c|c|c|} \hline k & $\khopl{k}{2}$ & $\khopl{k}{20}$ & $\khopcost{k}{2}{20}$\\ \hline 1 & 1, 3, 5 & 10, 30, 50, 60 & 0 \\ 2 & 4, 6 & 40, 50, 60 & 0.4 \\ 3 & 3, 5 & 40, 30, 50 & 0.1\\ \hline \end{tabular} \caption{\khop label of vertices $2$ and $20$} \label{tab:khop220} \end{table} \begin{table}[h] \centering \begin{tabular}{|c|c|c|c|} \hline k & $h_k(3)$ & $h_k(50)$ & $\khopcost{k}{3}{50}$ \\ \hline 0 & 3 & 50 & $0$\\ 1 & 2, 4, 6 & 20, 40, 60 & $0.4$ \\ 2 & 1, 5 & 10, 20 , 30, 60 & 0\\ 3 & 2, 4, 5 & 10, 20, 30, 40 & $0.3$ \\ 4 & $1$ & $10, 40, 60$ & $0$ \\ \hline \end{tabular} \caption{\khop labels of vertices $3$ and $50$.} \label{tab:khop350} \end{table} \subsection{Neighbor Concatenated Label} In Neighbor concatenated label (\ncl) , the information regarding the candidates of a neighbor that were pruned in the previous iteration is used along with the current \khop label to prune candidates in the current iteration. In contrast, the \khop label pruning strategy for a vertex $u$ works independently of the result of \khop label pruning of other vertices in the pattern. This leads us to the following recursive formulation for \ncl. The \ncl of a vertex in the ${k+1}^{th}$ iteration, $\nclab{k+1}{u}$, is defined as the tuple $(\{\nclab{k}{u'} | u' \in N(u)\},\xspace \khopl{k+1}{u})$. The first element(A) of the tuple is the \ncl of the neighbors of the vertex $u$ in the previous iteration and the second element(B) is exactly same as the \khop label defined in the previous section. We say that $\nclab{k+1}{v}$ dominates $\nclab{k+1}{u}$, denoted by $\nclab{k+1}{u} = (A, B) \preceq \nclab{k+1}{v} = (A', B') $, i) iff $\khopcost{k+1}{B}{B'} \leq \alpha$ i.e., the minimum cost of matching the \khop labels is within $\alpha$ ii) there exits an injective function $g\!\!:A\rightarrow A'$ such that $a \preceq g(a)$ for all $a \in A$ i.e., there is a one to one mapping between the \ncl labels (in the previous iteration, $k$) of neighbors of $u$ and $v$. The base case $\nclab{0}{u} \preceq \nclab{0}{v}$ iff $\matij{C}{L(u)}{L(v)} \leq \alpha$. For example, in Fig~\ref{fig:ncexample} $\nclab{1}{2} \preceq \nclab{1}{20}$ because $\khopcost{1}{2}{20} \leq \alpha$ and the \ncl labels of vertices $1, 3, 5$ are dominated by the \ncl labels of vertices $10, 50, 30$ respectively. The following theorem states that the \ncl of a pattern vertex $u$ is dominated by the \ncl of any of its representative vertex $v \in R(u)$. \begin{thm} Given any pattern vertex $u$, a representative vertex $v \in R(u)$ and cost threshold $\alpha$, $\nclab{k}{u} \preceq \nclab{k}{v}$ for all $k \geq 0$. \begin{myproof} Let $\phi$ be any isomorphism such that $\phi(u) = v$. We prove the theorem by using induction on $k$.\\ \textbf{Base case:} $\nclab{0}{u} \preceq \nclab{0}{v} \iff \matij{C}{L(u)}{L(v)} \leq \alpha$ is true because $v \in R(u)$. \\ \textbf{Inductive Hypothesis:} Assume that $\nclab{k}{u} \preceq \nclab{k}{v}$ holds true for all $u \in \pat$ and $v \in R(u)$. \\ Now consider $\nclab{k+1}{u} = (A, B)$ and $ \nclab{k+1}{v} = (A', B') $, from theorem \ref{thm:khop} we know that $\khopcost{k+1}{B}{B'} \leq \alpha$, for all $k \geq 0$. Let $u'$ be a neighbor of $u$ and let $v' \in \phi(u')$ , then from the inductive hypothesis $\nclab{k}{u'} \preceq \nclab{k}{v'}$. Therefore, the injective function $\phi$ maps the elements $a \in A$ to $\phi(a) \in A'$. The theorem follows from the definition of the NL label. \end{myproof} \label{thm:ncl} \end{thm} Based on the above theorem, a vertex $v$ can be pruned from \CR if $\nclab{k}{u} \not\preceq \nclab{k}{v}$ for some $k \geq 0$. In Fig~\ref{fig:ncexample} , consider the vertices $3 \in \pat$ , $50 \in \db$ and let $\alpha = 0.5$. The \ncl labels, $\nclab{0}{3} \preceq \nclab{0}{50}$ as $\matij{C}{B}{B} = 0 \leq \alpha$. Similarly it is also true for the pairs $(2, 20)$, $(4, 40)$ etc. It follows that $\nclab{1}{3} \preceq \nclab{1}{50}$ as the neighbors $2, 4, 6$ can be mapped to $20, 40, 60$ respectively and the minimum cost of the matching the $1$-hop label is $0.4$ which is less than the $\alpha$ threshold. But $\nclab{2}{3} \not\preceq \nclab{2}{50}$ because the \ncl label $\nclab{1}{6}$ is not dominated by the \ncl label of $20, 40$ or $60$ in the previous iteration. So, there is no mapping between the neighbors of vertices $3$ and $50$ in the current iteration. Hence, the vertex $50$ can be pruned from the candidate representative set of vertex $3$. Note that using the \khop label in the same example will not prune the vertex $50$ because the minimum cost of matching the \khop labels is within $\alpha$ as shown in table \ref{tab:khop350}. Therefore, \ncl label is more efficient compared to \khop label as it subsumes the latter label. \subsection{Candidate set verification} \label{sec:verification} The pruning methods based on the \khop and the \ncl labels start with a \CR and prune some of the candidate vertices based on the conditions described in theorems \ref{thm:khop} and \ref{thm:ncl}. The verification step reduces \CR to \RS by retaining only those vertices for which there exists an isomorphism $\phi$ in which $\phi(u) = v$. Informally, it does this by checking if the pattern $P$ can be embedded at $v$ such that total cost of label mismatch is at most $\alpha$. Let $w_p = u_0,\ldots, u_m$ be a walk in the pattern that covers each edge of the pattern at least once starting from vertex $u$ i.e. $u_0 = u$. Finding a path that covers each edge at least once is a special case of the Chinese postman problem \cite{chinesepostman} where the distance between pair of vertices is one. The following three conditions are satisfied iff the vertex $v$ represents the vertex $u$. i) there exists a walk $w_d = v_0,\ldots, v_m$, $\forall (v_i, v_{i+1}) \in w_d$, $v_i \in R(u_i)$ and $v_{i+1} \in R(u_{i+1})$. ii) $v_i = v_j$ implies that $u_i = u_j$. iii) $\sum\matij{C}{L(u_i)}{L(v_i)} \leq \alpha$ where $u_i \in \vp$. Unlike the \ncl label, these conditions are necessary and sufficient and can be verified by following the definition of isomorphism. Using an example we will show how these conditions can be used to check definitely whether $v \in R(u)$. Consider checking whether the vertex $30$ is a valid representative of the vertex $1$ in the pattern in the figure~\ref{subfig:ex_sub} and let $\alpha = 0.5$ . The walk $ w_p = 1, 2, 4, 3, 1$ covers each edge of the pattern at least once. A walk $w_d$, in the database in the figure~\ref{subfig:ex_db}, that satisfies the above three conditions should start by mapping the vertex $1$ to $30$. The total cost of matching the labels till now is $0.2$ and the budget available for matching other vertices in $P$ is $0.5 - 0.2 = 0.3$. We will cover the walk $w_p$ one edge at a time. For any $(u_i, u_{i+1}) \in w_p$, if $u_i$ and $u_{i+1}$ are mapped, then we verify that there is an edge between the database vertices $v_i$ and $v_{i+1}$ to which the pattern vertices are mapped. If however, $u_{i+1}$ is not mapped then we map it to some vertex $v_{i+1} \in R'(u_{i+1})$ and subtract the cost of this mapping from the remaining $\alpha$ cost. For example, in the first step $(1,2)$ we can map $2$ to $20$. The remaining cost is then $0.3 - 0.2 = 0.1$. In the next step $(2,3)$, the vertex $3$ cannot be mapped to any vertex in the database without violating the third condition. In such a case, we back track one step and choose a different mapping say $10$ for the vertex $2$ which is the last vertex that was mapped. Proceeding this way, we can arrive at the mapping corresponding to $\phi_{1}$ as in table~\ref{subfig:ex_occur}. This isomorphism not only guarantees that $30 \in R(1)$, it also implies that the verification check between the pairs $(2, 10), (3, 60)$ and $(4, 40)$ can be avoided because of the approximate isomorphism $\phi_1$ that was found. The above procedure can extended to enumerate the complete set of isomorphisms. \subsection{Label costs and dominance checking} \label{sec:labelcheck} Candidate representative vertices are pruned by checking for dominance relation between the \ncl labels of pattern vertex and that of candidate vertex in the database. Comparing the \ncl labels requires i) computing the cost of matching the \khop labels ii) matching the neighbors of pattern vertex with neighbors of the candidate vertex. First problem can be formulated as a minimum cost maximum flow in a network and the second as maximum matching in a bipartite graph. \medskip{\textit{Computing \khop label cost}:} To compute the minimum matching cost between the \khop labels $\khopl{k}{u}$ and $\khopl{k}{v}$, we compute the maximum flow with minimum cost in a flow network $F$ defined as follows. Each edge in $F$ is associated with a maximum capacity and the cost for sending a unit flow across it. The network contains a vertex for each label $l_u = L(u')$ where $v' \in h_k(u)$ and a vertex for each label $l_v = L(v')$ where $v' \in h_k(v)$. There is a directed between between source vertex ($s$) and each $l_u$ with a zero cost and a capacity equal to the multiplicity of the $l_u$ i.e., the number of vertices in $h_k(u)$ that have the label $l_u$. Similarly there is a directed edge between $l_v$ and the sink node ($t$). In addition, there is a directed edge from $l_u$ to $l_v$ with a cost equal to $\matij{C}{l_u}{l_v}$ and a capacity equal to the multiplicity of $l_u$. The cost between the \khop labels is equal to the minimum cost for maximum flow if the maximum flow is equal to $|\khopl{k}{u}|$ and $\infty$ otherwise. \\ Figure~\ref{fig:Hflow} shows the flow network required to compute the minimum cost of matching the \khop labels $\khopl{2}{2} = 4,6 $ and $\khopl{2}{20} = 40, 50, 60$ as shown in Table \ref{tab:khop220}. The labels of vertices in the \khop labels are $C,D$ and $B, B, A$ respectively. The capacity of the edge betweenn $B$ and $t$ is two because both the vertices $40$ and $50$ have the same label $B$. There is an edge from $s$ to each of $C, D$ with zero cost and maximum capacity of one. Similarly, there is an edge from each of $A, B$ to the sink vertex $t$ with zero cost and maximum capacity of one and two respectively. There is an edge from $C, D$ to each of $A, B$ with cost equal to the corresponding entry in the cost matrix $C$. The maximum flow in the network is two and the minimum cost of sending two units of flow $0.4$ is achieved by pushing a unit flow along the paths $s, C, B, t$ and $a, D, A, t$. Therefore, the cost of matching the labels $\khopl{2}{2}$ and $\khopl{2}{20}$ is $0.4$. It implies that the vertex $4$ with label $C$ can be matched to either $40$ or $50$ and the vertex $6$ to $60$. \medskip{\textit{Dominance check}: } Consider the \ncl labels $\nclab{k+1}{u} = (A, B)$ and $\nclab{k+1}{v} = (A', B')$, the cost matching $B$ and $B'$ can be computed using the above the network formulation. Finding an injective function $f\!\!:A \rightarrow A'$ such that $a \preceq f(a)$ , is equivalent to find a matching of size $|N(u)|$ in the bipartite graph with edges $(a, a')$, for all $a \in A$ and $a \preceq a'$. The \ncl label $\nclab{k}{v}$ therefore dominates $\nclab{k}{u}$ if the cost between the \khop labels is within $\alpha$ and the size of maximum bipartite matching is $|N(u)|$. \medskip{\textit{Optimization}: } The candidate pattern may contain groups of symmetric vertices that are indistinguishable with respect to the \khop label. In such a scenario, the candidate representative sets of all these vertices are exactly the same. Utilizing the symmetry, we can apply the pruning label strategy only on one vertex per symmetry group and replicate the results for all other vertices in the group. For example, the vertices $10$ and $40$ in figure~\ref{subfig:ex_sub} are symmetric and the representative sets $R(10)$ and $R(40)$ are exactly the same. In abstract algebra terms such groups are called orbits of the graph and can be computed using nauty algorithm \cite{nauty}. Even though computing the orbits is expensive, we can avoid $ (|g|-1) \times |\CR|$ \ncl label cost computations where $g$ is the size of an orbit. Note that we find the orbits only for the pattern which is usually very small compared to the database graph. %Note that the payoff is zero if all the vertex orbits are of size $1$. \subsection{Precomputing database \khop labels} The \khop label of the database vertices is independent of the candidate pattern. Also, the flow network to compute the cost of matching the \khop labels requires only the aggregate information about the number of vertices of a given label. Hence, we can precompute the \khop label of the database vertices and store them in the memory. The following theorem proves that computing \khop label is expensive. \begin{thm} k-reachable (KR) : Given a graph $G$, $k$ and $u \in \vg$. Compute $\khopl{k}{u}$. KR cannot be solved in polynomial time unless $P = NP$. \begin{myproof} We prove this by reducing Hamiltonian path (HP) to KR. Hamiltonian Path : Given a graph $G$, is there a simple path of length $|\vg|-1$ i.e. is there a path that visits each and every vertex exactly once. The problem of finding a Hamiltonian path is proven to be an NP-Complete problem \cite{npcomplete}.\\ Assume that algorithm $X(k)$ can compute KR in polynomial time. Let $|\vg| = n$ and $u$ be the starting vertex in HP if it exists. Given an instance of HP, we first get a vertex $v$, $\kpath{u}{v}{n-1}$ using $X(n-1)$. The vertex $v$ is removed from the graph and we find a vertex $v'$ such that $\kpath{u}{v'}{n-2}$ and $(v', v) \in \eg$. We repeat this process $n-1$ times. If at any stage $X(j) = \{\}$ then we restart from a different starting vertex. The vertices selected in each iteration lie on a path of length $n-1$ if it exists. If there is polynomial time algorithm for KR then HP could be solved in polynomial time by reducing it to KR. Therefore, $KR$ is atleast as hard as $HP$. So, KR cannot be solved in polynomial time unless P = NP. \end{myproof} \end{thm} To compute \khop label of a vertex $u$, we check for each vertex $v$ whether $v \in \kpath{u}{v}{k}$ by enumerating all possible $k$ length paths until a path is found. This procedure is exponential, we therefore fix a maximum value $k_{max}$ and use the \khop label based pruning only for values of $k \leq k_{max}$. It only takes a couple of minutes to compute the \khop label for $k \leq 6$ for all the vertices in the database graph. This is significantly less than the overall run time of the algorithm. Once $\khopl{k}{u}$ is computed we store in memory only the tuples $(m, l)$ where $m$ is the number of vertices $u' \in \khopl{k}{u}$, $L(u') = l$. The total amount of main memory required to store the precomputed \khop labels is O($|\vg| \times |\Sigma| \times k_max$). \begin{figure}[!h] \centering \scalebox{0.6}[0.6]{ \psset{unit=0.85in} \newcommand\arc[4]{\ncline{#1}{#2}{#3}\ncput{\colorbox{gray!40}{#4}}} \begin{pspicture}(0,1)(5,3) \cnodeput[doubleline=true](1,2){src}{s} \cnodeput(2,1){n1}{C} \cnodeput(2,3){n2}{D} \cnodeput[doubleline=true](5,2){sink}{t} \cnodeput(4,1){n4}{B} \cnodeput(4,3){n5}{A} \arc{->}{src}{n1}{$1,0$} \arc{->}{src}{n2}{$1,0$} %\arc{->}{n1}{n4}{$1$} \ncline{->}{n1}{n4}\ncput[npos=0.5]{\colorbox{gray!40}{$1,0.1$}} \ncline{->}{n1}{n5}\ncput[npos=0.3]{\colorbox{gray!40}{$1,1$}} \ncline{->}{n2}{n4}\ncput[npos=0.3]{\colorbox{gray!40}{$1,0.6$}} \ncline{->}{n2}{n5}\ncput[npos=0.5]{\colorbox{gray!40}{$1,0.3$}} \arc{->}{n4}{sink}{$2,0$} \arc{->}{n5}{sink}{$1,0$} %\arc{->}{n6}{sink}{$1$} \end{pspicture} } \caption{Flow network for \khopl{2}{2} and \khopl{2}{20}} \label{fig:Hflow} \end{figure}
{ "alphanum_fraction": 0.6718894786, "avg_line_length": 52.4105960265, "ext": "tex", "hexsha": "7cfb9759f847dc319649046c450fdc9a658e2026", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-05-08T11:17:33.000Z", "max_forks_repo_forks_event_min_datetime": "2020-05-08T11:17:33.000Z", "max_forks_repo_head_hexsha": "4bb1d78b52175add3955de47281c3ee0073c7943", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "PranayAnchuri/approx-graph-mining-with-label-costs", "max_forks_repo_path": "paper/representative.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4bb1d78b52175add3955de47281c3ee0073c7943", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "PranayAnchuri/approx-graph-mining-with-label-costs", "max_issues_repo_path": "paper/representative.tex", "max_line_length": 89, "max_stars_count": null, "max_stars_repo_head_hexsha": "4bb1d78b52175add3955de47281c3ee0073c7943", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "PranayAnchuri/approx-graph-mining-with-label-costs", "max_stars_repo_path": "paper/representative.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7958, "size": 23742 }
\hypertarget{section}{% \section{1}\label{section}} \bibverse{1} A revelation, Yahweh's+ 1:1 ``Yahweh'' is God's proper Name, sometimes rendered ``LORD'' (all caps) in other translations. word to Israel by Malachi. \bibverse{2} ``I have loved you,'' says Yahweh. Yet you say, ``How have you loved us?'' ``Wasn't Esau Jacob's brother?'' says Yahweh, ``Yet I loved Jacob; \bibverse{3} but Esau I hated, and made his mountains a desolation, and gave his heritage to the jackals of the wilderness.'' \bibverse{4} Whereas Edom says, ``We are beaten down, but we will return and build the waste places,'' Yahweh of Armies says, ``They shall build, but I will throw down; and men will call them `The Wicked Land,' even the people against whom Yahweh shows wrath forever.'' \bibverse{5} Your eyes will see, and you will say, ``Yahweh is great---even beyond the border of Israel!'' \bibverse{6} ``A son honors his father, and a servant his master. If I am a father, then where is my honor? And if I am a master, where is the respect due me?'' says Yahweh of Armies to you priests who despise my name. ``You say, `How have we despised your name?' \bibverse{7} You offer polluted bread on my altar. You say, `How have we polluted you?' In that you say, `Yahweh's table is contemptible.' \bibverse{8} When you offer the blind for sacrifice, isn't that evil? And when you offer the lame and sick, isn't that evil? Present it now to your governor! Will he be pleased with you? Or will he accept your person?'' says Yahweh of Armies. \bibverse{9} ``Now, please entreat the favor of God,+ 1:9 The Hebrew word rendered ``God'' is ``אֱלֹהִ֑ים'' (Elohim). that he may be gracious to us. With this, will he accept any of you?'' says Yahweh of Armies. \bibverse{10} ``Oh that there were one among you who would shut the doors, that you might not kindle fire on my altar in vain! I have no pleasure in you,'' says Yahweh of Armies, ``neither will I accept an offering at your hand. \bibverse{11} For from the rising of the sun even to its going down, my name is great among the nations, and in every place incense will be offered to my name, and a pure offering; for my name is great among the nations,'' says Yahweh of Armies. \bibverse{12} ``But you profane it when you say, `Yahweh's table is polluted, and its fruit, even its food, is contemptible.' \bibverse{13} You say also, `Behold,+ 1:13 ``Behold'', from ``הִנֵּה'', means look at, take notice, observe, see, or gaze at. It is often used as an interjection. what a weariness it is!' And you have sniffed at it'', says Yahweh of Armies; ``and you have brought that which was taken by violence, the lame, and the sick; thus you bring the offering. Should I accept this at your hand?'' says Yahweh. \bibverse{14} ``But the deceiver is cursed who has in his flock a male, and vows and sacrifices to the Lord+ 1:14 The word translated ``Lord'' is ``Adonai.'' a defective thing; for I am a great King,'' says Yahweh of Armies, ``and my name is awesome among the nations.'' \hypertarget{section-1}{% \section{2}\label{section-1}} \bibverse{1} ``Now, you priests, this commandment is for you. \bibverse{2} If you will not listen, and if you will not take it to heart, to give glory to my name,'' says Yahweh of Armies, ``then I will send the curse on you, and I will curse your blessings. Indeed, I have cursed them already, because you do not take it to heart. \bibverse{3} Behold, I will rebuke your offspring,+ 2:3 or, seed and will spread dung on your faces, even the dung of your feasts; and you will be taken away with it. \bibverse{4} You will know that I have sent this commandment to you, that my covenant may be with Levi,'' says Yahweh of Armies. \bibverse{5} ``My covenant was with him of life and peace; and I gave them to him that he might be reverent toward me; and he was reverent toward me, and stood in awe of my name. \bibverse{6} The law of truth was in his mouth, and unrighteousness was not found in his lips. He walked with me in peace and uprightness, and turned many away from iniquity. \bibverse{7} For the priest's lips should keep knowledge, and they should seek the law at his mouth; for he is the messenger of Yahweh of Armies. \bibverse{8} But you have turned away from the path. You have caused many to stumble in the law. You have corrupted the covenant of Levi,'' says Yahweh of Armies. \bibverse{9} ``Therefore I have also made you contemptible and wicked before all the people, according to the way you have not kept my ways, but have had respect for persons in the law. \bibverse{10} Don't we all have one father? Hasn't one God created us? Why do we deal treacherously every man against his brother, profaning the covenant of our fathers? \bibverse{11} Judah has dealt treacherously, and an abomination is committed in Israel and in Jerusalem; for Judah has profaned the holiness of Yahweh which he loves, and has married the daughter of a foreign god. \bibverse{12} Yahweh will cut off the man who does this, him who wakes and him who answers, out of the tents of Jacob and him who offers an offering to Yahweh of Armies. \bibverse{13} ``This again you do: you cover Yahweh's altar with tears, with weeping, and with sighing, because he doesn't regard the offering any more, neither receives it with good will at your hand. \bibverse{14} Yet you say, `Why?' Because Yahweh has been witness between you and the wife of your youth, against whom you have dealt treacherously, though she is your companion and the wife of your covenant. \bibverse{15} Did he not make you one, although he had the residue of the Spirit? Why one? He sought godly offspring. Therefore take heed to your spirit, and let no one deal treacherously against the wife of his youth. \bibverse{16} One who hates and divorces'', says Yahweh, the God of Israel, ``covers his garment with violence!'' says Yahweh of Armies. ``Therefore pay attention to your spirit, that you don't be unfaithful. \bibverse{17} You have wearied Yahweh with your words. Yet you say, `How have we wearied him?' In that you say, `Everyone who does evil is good in Yahweh's sight, and he delights in them;' or `Where is the God of justice?' \hypertarget{section-2}{% \section{3}\label{section-2}} \bibverse{1} ``Behold, I send my messenger, and he will prepare the way before me! The Lord, whom you seek, will suddenly come to his temple. Behold, the messenger of the covenant, whom you desire, is coming!'' says Yahweh of Armies. \bibverse{2} ``But who can endure the day of his coming? And who will stand when he appears? For he is like a refiner's fire, and like launderers' soap; \bibverse{3} and he will sit as a refiner and purifier of silver, and he will purify the sons of Levi, and refine them as gold and silver; and they shall offer to Yahweh offerings in righteousness. \bibverse{4} Then the offering of Judah and Jerusalem will be pleasant to Yahweh as in the days of old and as in ancient years. \bibverse{5} I will come near to you to judgment. I will be a swift witness against the sorcerers, against the adulterers, against the perjurers, and against those who oppress the hireling in his wages, the widow, and the fatherless, and who deprive the foreigner of justice, and don't fear me,'' says Yahweh of Armies. \bibverse{6} ``For I, Yahweh, don't change; therefore you, sons of Jacob, are not consumed. \bibverse{7} From the days of your fathers you have turned away from my ordinances and have not kept them. Return to me, and I will return to you,'' says Yahweh of Armies. ``But you say, `How shall we return?' \bibverse{8} Will a man rob God? Yet you rob me! But you say, `How have we robbed you?' In tithes and offerings. \bibverse{9} You are cursed with the curse; for you rob me, even this whole nation. \bibverse{10} Bring the whole tithe into the storehouse, that there may be food in my house, and test me now in this,'' says Yahweh of Armies, ``if I will not open you the windows of heaven, and pour you out a blessing, that there will not be enough room for. \bibverse{11} I will rebuke the devourer for your sakes, and he shall not destroy the fruits of your ground; neither shall your vine cast its fruit before its time in the field,'' says Yahweh of Armies. \bibverse{12} ``All nations shall call you blessed, for you will be a delightful land,'' says Yahweh of Armies. \bibverse{13} ``Your words have been harsh against me,'' says Yahweh. ``Yet you say, `What have we spoken against you?' \bibverse{14} You have said, `It is vain to serve God,' and `What profit is it that we have followed his instructions and that we have walked mournfully before Yahweh of Armies? \bibverse{15} Now we call the proud happy; yes, those who work wickedness are built up; yes, they tempt God, and escape.' \bibverse{16} Then those who feared Yahweh spoke one with another; and Yahweh listened and heard, and a book of memory was written before him for those who feared Yahweh and who honored his name. \bibverse{17} They shall be mine,'' says Yahweh of Armies, ``my own possession in the day that I make. I will spare them, as a man spares his own son who serves him. \bibverse{18} Then you shall return and discern between the righteous and the wicked, between him who serves God and him who doesn't serve him. \hypertarget{section-3}{% \section{4}\label{section-3}} \bibverse{1} ``For behold, the day comes, burning like a furnace, when all the proud and all who work wickedness will be stubble. The day that comes will burn them up,'' says Yahweh of Armies, ``so that it will leave them neither root nor branch. \bibverse{2} But to you who fear my name shall the sun of righteousness arise with healing in its wings. You will go out and leap like calves of the stall. \bibverse{3} You shall tread down the wicked; for they will be ashes under the soles of your feet in the day that I make,'' says Yahweh of Armies. \bibverse{4} ``Remember the law of Moses my servant, which I commanded to him in Horeb for all Israel, even statutes and ordinances. \bibverse{5} Behold, I will send you Elijah the prophet before the great and terrible day of Yahweh comes. \bibverse{6} He will turn the hearts of the fathers to the children and the hearts of the children to their fathers, lest I come and strike the earth with a curse.''
{ "alphanum_fraction": 0.752359638, "avg_line_length": 55.8532608696, "ext": "tex", "hexsha": "a138d19b49444b56b23bd31f0439d1ee6710e5f3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "bibliadelpueblo/BibliaLibre", "max_forks_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/41-Malachi.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "bibliadelpueblo/BibliaLibre", "max_issues_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/41-Malachi.tex", "max_line_length": 72, "max_stars_count": null, "max_stars_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "bibliadelpueblo/BibliaLibre", "max_stars_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/41-Malachi.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2897, "size": 10277 }
% -*- mode: latex; TeX-master: "Vorbis_I_spec"; -*- %!TEX root = Vorbis_I_spec.tex % $Id$ \section*{Colophon} \includegraphics{xifish} \label{footer} %\TODO{display xifish.pdf, [Xiph.org logo]} Ogg is a \href{http://www.xiph.org/}{Xiph.org Foundation} effort to protect essential tenets of Internet multimedia from corporate hostage-taking; Open Source is the net's greatest tool to keep everyone honest. See \href{http://www.xiph.org/about.html}{About the Xiph.org Foundation} for details. Ogg Vorbis is the first Ogg audio CODEC. Anyone may freely use and distribute the Ogg and Vorbis specification, whether in a private, public or corporate capacity. However, the Xiph.org Foundation and the Ogg project (xiph.org) reserve the right to set the Ogg Vorbis specification and certify specification compliance. Xiph.org's Vorbis software CODEC implementation is distributed under a BSD-like license. This does not restrict third parties from distributing independent implementations of Vorbis software under other licenses. Ogg, Vorbis, Xiph.org Foundation and their logos are trademarks (tm) of the \href{http://www.xiph.org/}{Xiph.org Foundation}. These pages are copyright (C) 1994-2007 Xiph.org Foundation. All rights reserved. This document is set using \LaTeX.
{ "alphanum_fraction": 0.7785602504, "avg_line_length": 38.7272727273, "ext": "tex", "hexsha": "df5a289216acff4f578788d6cdc6addd01b8b9fc", "lang": "TeX", "max_forks_count": 96, "max_forks_repo_forks_event_max_datetime": "2022-01-20T19:52:19.000Z", "max_forks_repo_forks_event_min_datetime": "2015-11-22T07:47:26.000Z", "max_forks_repo_head_hexsha": "de559619fd4dd0d2d9608436696fd44bdf74eba8", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "yinquan529/platform-external-libvorbis", "max_forks_repo_path": "doc/footer.tex", "max_issues_count": 374, "max_issues_repo_head_hexsha": "de559619fd4dd0d2d9608436696fd44bdf74eba8", "max_issues_repo_issues_event_max_datetime": "2021-12-17T14:18:08.000Z", "max_issues_repo_issues_event_min_datetime": "2015-11-03T12:37:22.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "yinquan529/platform-external-libvorbis", "max_issues_repo_path": "doc/footer.tex", "max_line_length": 70, "max_stars_count": 278, "max_stars_repo_head_hexsha": "de559619fd4dd0d2d9608436696fd44bdf74eba8", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "yinquan529/platform-external-libvorbis", "max_stars_repo_path": "doc/footer.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-20T18:21:05.000Z", "max_stars_repo_stars_event_min_datetime": "2015-11-03T03:01:20.000Z", "num_tokens": 338, "size": 1278 }
% This template has been tested with LLNCS DOCUMENT CLASS -- version 2.20 (24-JUN-2015) %"runningheads" enables: % - page number on page 2 onwards % - title/authors on even/odd pages %This is good for other readers to enable proper archiving among other papers and pointing to %content. Even if the title page states the title, when printed and stored in a folder, when %blindly opening the folder, one could hit not the title page, but an arbitrary page. Therefore, %it is good to have title printed on the pages, too. \documentclass[runningheads,a4paper]{llncs}[2015/06/24] %cmap has to be loaded before any font package (such as cfr-lm) \usepackage{cmap} \usepackage[T1]{fontenc} \usepackage{graphicx} %Even though `american`, `english` and `USenglish` are synonyms for babel package (according to https://tex.stackexchange.com/questions/12775/babel-english-american-usenglish), the llncs document class is prepared to avoid the overriding of certain names (such as "Abstract." -> "Abstract" or "Fig." -> "Figure") when using `english`, but not when using the other 2. %english has to go last to set it as default language \usepackage[ngerman,english]{babel} %Hint by http://tex.stackexchange.com/a/321066/9075 -> enable "= as dashes \addto\extrasenglish{\languageshorthands{ngerman}\useshorthands{"}} %better font, similar to the default springer font %cfr-lm is preferred over lmodern. Reasoning at http://tex.stackexchange.com/a/247543/9075 \usepackage[% rm={oldstyle=false,proportional=true},% sf={oldstyle=false,proportional=true},% tt={oldstyle=false,proportional=true,variable=true},% qt=false% ]{cfr-lm} % %if more space is needed, exchange cfr-lm by mathptmx %\usepackage{mathptmx} %for demonstration purposes only \usepackage[math]{blindtext} %Sorts the citations in the brackets %It also allows \cite{refa, refb}. Otherwise, the document does not compile. % Error message: "White space in argument" \usepackage{cite} %% If you need packages for other papers, %% START COPYING HERE %% COPY ALSO cmap and fontenc from lines 10 to 12 %extended enumerate, such as \begin{compactenum} \usepackage{paralist} %put figures inside a text %\usepackage{picins} %use %\piccaptioninside %\piccaption{...} %\parpic[r]{\includegraphics ...} %Text... %for easy quotations: \enquote{text} \usepackage{csquotes} %enable margin kerning \usepackage{microtype} %tweak \url{...} \usepackage{url} %\urlstyle{same} %improve wrapping of URLs - hint by http://tex.stackexchange.com/a/10419/9075 \makeatletter \g@addto@macro{\UrlBreaks}{\UrlOrds} \makeatother %nicer // - solution by http://tex.stackexchange.com/a/98470/9075 %DO NOT ACTIVATE -> prevents line breaks %\makeatletter %\def\Url@twoslashes{\mathchar`\/\@ifnextchar/{\kern-.2em}{}} %\g@addto@macro\UrlSpecials{\do\/{\Url@twoslashes}} %\makeatother %diagonal lines in a table - http://tex.stackexchange.com/questions/17745/diagonal-lines-in-table-cell %slashbox is not available in texlive (due to licensing) and also gives bad results. This, we use diagbox %\usepackage{diagbox} %required for pdfcomment later \usepackage{xcolor} %enable nice comments %this also loads hyperref \usepackage{pdfcomment} %enable hyperref without colors and without bookmarks \hypersetup{hidelinks, colorlinks=true, allcolors=black, pdfstartview=Fit, breaklinks=true} %enables correct jumping to figures when referencing \usepackage[all]{hypcap} \newcommand{\commentontext}[2]{\colorbox{yellow!60}{#1}\pdfcomment[color={0.234 0.867 0.211},hoffset=-6pt,voffset=10pt,opacity=0.5]{#2}} \newcommand{\commentatside}[1]{\pdfcomment[color={0.045 0.278 0.643},icon=Note]{#1}} %compatibality with packages todo, easy-todo, todonotes \newcommand{\todo}[1]{\commentatside{#1}} %compatiblity with package fixmetodonotes \newcommand{\TODO}[1]{\commentatside{#1}} %enable \cref{...} and \Cref{...} instead of \ref: Type of reference included in the link \usepackage[capitalise,nameinlink]{cleveref} %Nice formats for \cref \crefname{section}{Sect.}{Sect.} \Crefname{section}{Section}{Sections} \usepackage{xspace} %\newcommand{\eg}{e.\,g.\xspace} %\newcommand{\ie}{i.\,e.\xspace} \newcommand{\eg}{e.\,g.,\ } \newcommand{\ie}{i.\,e.,\ } %introduce \powerset - hint by http://matheplanet.com/matheplanet/nuke/html/viewtopic.php?topic=136492&post_id=997377 \DeclareFontFamily{U}{MnSymbolC}{} \DeclareSymbolFont{MnSyC}{U}{MnSymbolC}{m}{n} \DeclareFontShape{U}{MnSymbolC}{m}{n}{ <-6> MnSymbolC5 <6-7> MnSymbolC6 <7-8> MnSymbolC7 <8-9> MnSymbolC8 <9-10> MnSymbolC9 <10-12> MnSymbolC10 <12-> MnSymbolC12% }{} \DeclareMathSymbol{\powerset}{\mathord}{MnSyC}{180} % correct bad hyphenation here \hyphenation{op-tical net-works semi-conduc-tor} %% END COPYING HERE \begin{document} \title{Paper Title} %If Title is too long, use \titlerunning %\titlerunning{Short Title} %Single insitute \author{Firstname Lastname \and Firstname Lastname} %If there are too many authors, use \authorrunning %\authorrunning{First Author et al.} \institute{Institute} %Multiple insitutes %Currently disabled % \iffalse %Multiple institutes are typeset as follows: \author{Firstname Lastname\inst{1} \and Firstname Lastname\inst{2} } %If there are too many authors, use \authorrunning %\authorrunning{First Author et al.} \institute{ Insitute 1\\ \email{...}\and Insitute 2\\ \email{...} } \fi \maketitle \begin{abstract} Abstract goes here \end{abstract} \begin{keywords} keyword1, keyword2 \end{keywords} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction}\label{sec:intro} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \blindtext\todo{Refine me} Winery~\cite{Winery} is graphical \commentontext{modeling}{modeling with one \enquote{l}, because of AE} tool. \begin{figure} Simple Figure \caption{Simple Figure} \label{fig:simple} \end{figure} \begin{table} \caption{Simple Table} \label{tab:simple} Simple Table \end{table} cref Demonstration: Cref at beginning of sentence, cref in all other cases. \Cref{fig:simple} shows a simple fact, although \cref{fig:simple} could also show something else. \Cref{tab:simple} shows a simple fact, although \cref{tab:simple} could also show something else. \Cref{sec:intro} shows a simple fact, although \cref{sec:intro} could also show something else. Brackets work as designed: <test> The symbol for powerset is now correct: $\powerset$ and not a Weierstrass p ($\wp$). \begin{inparaenum} \item All these items... \item ...appear in one line \item This is enabled by the paralist package. \end{inparaenum} ``something in quotes'' using plain tex or use \enquote{the enquote command}. You can now write words containing hyphens which are hyphenated (application"=specific) at other places. This is enabled by an additional configuration of the babel package. In case you write \enquote{application-specific}, then the word will only be hyphenated at the dash. You can also write applica\allowbreak{}tion-specific, but this is much more effort. \section{Conclusion and Outlook} \subsubsection*{Acknowledgments} ... In the bibliography, use \texttt{\textbackslash textsuperscript} for ``st'', ``nd'', ...: E.g., \enquote{The 2\textsuperscript{nd} conference on examples}. When you use \href{https://www.jabref.org}{JabRef}, you can use the clean up command to achieve that. See \url{https://help.jabref.org/en/CleanupEntries} for an overview of the cleanup functionality. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \bibliographystyle{splncs03} \bibliography{paper} All links were last followed on October 5, 2014. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{document}
{ "alphanum_fraction": 0.7216920299, "avg_line_length": 31.9094650206, "ext": "tex", "hexsha": "150924d6ed8f8f49396c4565bbb2e027eb2fc0a4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "de91ad28a7bde621f37ef7666fd49bd29ff56be8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "CNCA-CeNAT/comm-patterns-clustering", "max_forks_repo_path": "Paper/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "de91ad28a7bde621f37ef7666fd49bd29ff56be8", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "CNCA-CeNAT/comm-patterns-clustering", "max_issues_repo_path": "Paper/paper.tex", "max_line_length": 366, "max_stars_count": null, "max_stars_repo_head_hexsha": "de91ad28a7bde621f37ef7666fd49bd29ff56be8", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CNCA-CeNAT/comm-patterns-clustering", "max_stars_repo_path": "Paper/paper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2233, "size": 7754 }
\documentclass[12pt]{article} \usepackage[utf8]{inputenc} \usepackage{comment} \usepackage{listings} \usepackage{mathtools} \setlength{\parindent}{0em} \setlength{\parskip}{0.5em} \title{Block Two: The Information Layer} \author{Yangtao Ge} \date{\today} \begin{document} \maketitle \section{Chapter 2: Binary Value and Number System} \begin{abstract} This chapter describes binary values -- the way in which computer \textbf{hardware} represents and manages information. It also puts the binary value in all number system. \end{abstract} \subsection{Number and Computing} Some definitions of Numbers: \begin{itemize} \item Number: A unit of an abstract mathematical system subject to \underline{the laws of arithmetic} (succession, addition and multiplication). \item Natural number: The number \textbf{0} and any number obtained by \underline{reaptedly adding to 1} to 1. \item Negative number: A value less than zero and with a sign oppsite to its \textbf{positive counterpart} \item Rational number: An integer or the \underline{quotient} of two integers (division by zero included) \end{itemize} \subsection{Positional Notation} Some definitions of Base: \begin{itemize} \item Base: The foundational value of a number system, which dictates \textbf{number digits} and the \textbf{value of digit Position} \item Positional notation: A way of expressing number in different base system in a following way: \begin{equation} d_n * R^{n-1} + d_{n-1} * R^{n-2} + ... + d_2 * R + d_1 \end{equation} where \textbf{Base-R} has \textit{n} digits and $d_i$ represents the digit in the \textit{i}th position \end{itemize} Watch out the digit in a number. e.g. 2074 does not have base \textbf{less than Base-8} because digit 7 is used here. \textbf{2 digits} is needed to represent the base value. e.g. 10 is \underline{ten} in decimal. 10 is \underline{eight} in base 8. 10 is \underline{two} in binary. Carry and borrow system is also applied to other base system. However, the value represented binary these carries and borrows means the \textbf{value of the base}. All power of 2 number system can be transfered to \textbf{binary}, then to \textbf{decimal}. Examples are as follows: \begin{center} \underline{count every 4 digits for Hex} 1010110 = 101(5) \& 0110(6) \underline{count every three digits for Oct} 101010111100 = 101(5) \& 010(2) \& 111(7) \& 100(4) \end{center} Algorithm for Base 10 to Other Bases is as follows: \begin{lstlisting} WHILE (the quotient is not zero): Divide the decimal number by the new base Make the reminder the next digit to the left in the answer Replace the decimal number with the quotient \end{lstlisting} This algorithm shows that: \begin{itemize} \item The production of new number is \textbf{from right to left} \item Quotient is repeatedly used, reminder is the \textbf{answer} \end{itemize} some definitions about bit: \begin{itemize} \item binary digit: A digit in the \textbf{binary number} system \item bit: Binary digit \item byte: \textbf{Eight} binary digits \item word: A group of one or more \underline{bytes} \newline \emph{the number of bits in a word = word length of the computer} \end{itemize} \section{Chapter 3: Data Representation} \begin{abstract} This chapter includes how to store a certain type of information and represent in a computer environment \end{abstract} \subsection{Data and Computers} Some definitions related to data: \begin{itemize} \item Data: basic value and facts \item Information: Organized data and can provide \textbf{useful solutions} to problems \item Multimeadia: Sevral different media types i.e. Numbers, Text, Audio, images and etc. \item Bandwidth: The number of bits or bytes that can be transmitted from one place to another \underline{within a fixed time} \item Data compression: shrink the size of the data \item Compression ratio: \begin{equation} Ratio = \frac{Compressed\ Size}{Original\ Size} \end{equation} $0 < Ratio < 1$, closer to zero $\rightarrow$ tighter the compression \item Lossless: \underline{Without any Loss} in the process of compaction \item Lossy: \underline{Is lost} in the process of compaction \end{itemize} Real world is \textbf{infinite}, but computer is \textbf{finite} Some definitions about types of data: \begin{itemize} \item Analog data: A \textbf{continuous} representation of data e.g. mercury thermometer (\underline{smooth wave}) \item Digital data: A \textbf{discrete} representation of data e.g. button (\underline{square wave}) \end{itemize} In computer: \begin{itemize} \item Analog Data $\xrightarrow{\text{digitize}}$ Digital Data \item use \textbf{binary} system to represent them \end{itemize} Degraded: Electronic signals degrades as they move down a line (\textbf{Threshold}) Some definitions about Digital signals: \begin{itemize} \item Pulse-Code Modulation (PCM): Variation in a signal that jumps sharply between two \textbf{extremes} \item Reclocked: The act of reasserting an original digital signal before \textbf{too much degreadation occurs} \end{itemize} \underline{Analog vs Digital:} (need review) \begin{itemize} \item[\textbf{Analog}] degrades $\rightarrow$ in-range value $\rightarrow$ valid $\rightarrow$ information lost \item[\textbf{Digital}] degrades $\rightarrow$ PCM $\rightarrow$ high to low $\rightarrow$ reclocked $\rightarrow$ information saved \end{itemize} \emph{n} bits can represent $2^{n}$ things.\newline Increase the number of bits by 1 $\Rightarrow$ \textbf{double} the number of things we can represent \subsection{Representing Numeric Data} \subsubsection{Negative Values} \underline{The work flow is:}\newline Sign-Magnitude Representation $\rightarrow$ Fixed-sized Numbers $\rightarrow$ Two's Complement \begin{itemize} \item Sign-Magnitude Representation: ``value + sign'' \newline Problem: Will have \textbf{two} representation of 0 (+0 \& -0) \item Fixed-sized Numbers: use half of the integers to represent negatives \newline Method: Add the number together and \textbf{dicard} any carries \begin{equation} Negative(I) = 10^k - I \end{equation} Problem: Can't be represnet in computer \item Two's Complement: use certain number of bits to represent a integer and \underline{leftmost} one bit for representing \textbf{sign} e.g. -(2) is 11111110 \newline Method: \textbf{invert} the bits and \textbf{add 1} \begin{equation} Negative(I) = 2^k - I \end{equation} \end{itemize} \emph{Overflow} occurs when the value that we compute cannot fit into \underline{the number of bits} we have allocated for the result\newline e.g. 01111111(127) + 00000011(3) = 10000010(-126) is not +130 \subsubsection{Real Numbers} Different from Math: all noninteger values $\Leftrightarrow$ Real Number \emph{Radix} means the \textbf{dot} that separates the \underline{whole} part from the \underline{fractional} part in a real number in \textit{any base} \emph{Floating Point} means a representation of a real number that keeps track of the \textbf{sign}, \textbf{mantissa}, and \textbf{exponent} Base-10: \begin{equation} R = sign * mantissa * 10^{exp} \end{equation} Base-2: \begin{equation} R = sign * mantissa * 2^{exp} \end{equation} Floating Point needs 64 bits: $64 = 1(sign) + 11(exponent) + 52(mantissa)$ i.e. double precision \underline{Algorithm} Converting fractional parts from base-10 to other: \begin{lstlisting} WHILE (the fractional part is not zero): Multiply the fractional part by the new base Make the whole part the next digit to the left in the answer Replace the fractional part with the result of multiplication \end{lstlisting} Noticed that: \begin{itemize} \item it is possible that the loop will \textbf{never end} $\rightarrow$ precision problems \item instead of division, \textbf{multiplication} is used here \item More detail method of computing the Floating point is \textbf{NOT} included in this book \end{itemize} \subsection{Representing Text} \underline{Finite} number of characters $\rightarrow$ list all of them $\rightarrow$ represent in binary \newline But it is only \textbf{English}, Other language has other characters. \underline{ASCII} $\xrightarrow{Only\ for}$ English, \underline{Unicode} $\xrightarrow{comprimise}$ other language \emph{Character set} is a list of character and the codes used to represent each one. \subsubsection{Character Set} Two kinds of character sets are used: \begin{itemize} \item ASCII: \textbf{8} bits are used \item Unicode: \textbf{16} bits are used \end{itemize} Notice that: \begin{itemize} \item ASCII will not affect Unicode \item ASCII $\xrightarrow{Subset}$ Unicode \newline i.e. first eight bits are representing original ASCII \end{itemize} \subsubsection{Text Compression} Three kinds of ways are possible for text compression: \begin{itemize} \item Keyword Encoding: Replacing a frequently used word with a \textbf{single} character \newline Limitations: \begin{itemize} \item the character is already in the text $\rightarrow$ meaning confusing \item Upper \& Lower Case problem \item Frequent words are usually \textbf{short} \end{itemize} \item Run-length Encoding: Replacing a long series of a repeated character with a count of repetition\newline i.e. $String = Flag + Repetition + Times$ \newline e.g. AAAAAAA = *A7 \newline Limitations: \begin{itemize} \item worthless to encode repetitions less than \textbf{Three} \item Use ASCII digit to represent the ``Times'' \end{itemize} \item Huffman Encoding: Using a variable-length binary string to represent a character Limitations: \begin{itemize} \item one string cannot \textbf{prefix} the other string \item Encoding only focusing on particular text \end{itemize} \end{itemize} \subsection{Representing Audio Data} Sound is \textbf{Analog} $\xrightarrow{Digitalize}$ computer signals $\xrightarrow{sampling}$ distinct voltage levels $\rightarrow$ Hardware \emph{sampling} is periodically measure the voltage of the signal and record the appropriate numeric value MP3 is the \textbf{most common} audio format in the world, which employs both \underline{lossy} and \underline{lossless} compression \subsection{Representing Images and Graphics} Representing colour: \begin{itemize} \item \textit{HiColor}: 16-bit color depth\newline i.e. $C = R(5) + G(5) + B(5) + 1$ \item \textit{TrueColor}: 24-bit color depth\newline i.e. $C = R(8) + G(8) + B(8)$ (0-255 each) \end{itemize} Some definitions abot digital images and Graphics: \begin{itemize} \item Pixel: Individual dots used to represent a picture stands for picture element \item Resolution: The number of pixel used to represent a picture \item Raster-graphics format: storing image information pixel by pixel \newline e.g. GIF, BMP, JPEG \end{itemize} Four types of formats: \begin{itemize} \item BMP: Bitmap file\newline Characteristic: strightforward, very large(record colour \underline{pixel by pixel}) \item GIF: Graphics Interchange Format\newline Characteristic: \textbf{256} colours only, can do \textbf{animation} \item JPEG: reduings the size of image but more colourful \item PNG: Portable Network Graphics\newline Characteristic: Editable, but not animations \end{itemize} \subsection{Representing Video} \emph{Video codec} means COmpressor/DECompressor i.e. shrink the size and play on a computer or over Network Two types of compression(unimportant, detail needs references): \begin{itemize} \item temporal: Based on differences between \textbf{consecutive frames} \item spatial: Base on the same compression techniques used for still images \end{itemize} \end{document}
{ "alphanum_fraction": 0.7373149226, "avg_line_length": 41.1712328767, "ext": "tex", "hexsha": "00ddddc585ecaf17aaa6ee9b28fcb2f3e6ff997b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bdaef22d33e6355ace988c342de2198b4599e86c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "YangtaoGe518/CompReadingNotes", "max_forks_repo_path": "CSIlluminated/Block2/Block2.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bdaef22d33e6355ace988c342de2198b4599e86c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "YangtaoGe518/CompReadingNotes", "max_issues_repo_path": "CSIlluminated/Block2/Block2.tex", "max_line_length": 148, "max_stars_count": null, "max_stars_repo_head_hexsha": "bdaef22d33e6355ace988c342de2198b4599e86c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "YangtaoGe518/CompReadingNotes", "max_stars_repo_path": "CSIlluminated/Block2/Block2.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3209, "size": 12022 }
\section{Experiments}\label{sec:Experiments} All experiments were performed on the MNIST~\cite{LeCun:1998} and \fashmnist~\cite{FashionMNIST} datasets. The MNIST positive and negative classes were digits~``4'' and~``9'' respectively while \fashmnist's positive and negative classes were coat and ankle boot respectively. The \textit{propensity score},~${e(x)=\Pr[\xBase\in\Pos\vert x,y=1]}$, is the fraction of all positive-valued training examples that are labeled, i.e.,~are in~$\Pos$. All experiments in this section set ${e(x)=0.5}$. Any unlabeled positive-valued training examples were in~$\Unlabel$. The negative-valued training examples were similarly split into labeled and unlabeled examples. The unlabeled negative examples were placed in~$\Unlabel$, while the labeled negative examples formed a set,~$\Neg$, which was used for a supervised baseline as described in Section~\ref{sec:Experiments:Baseline}. \toolname's encoder and decoders consisted of two hidden layers of 256~hidden neurons with ReLU activation. $\abs{\zP}=\abs{\zN}=5$ while ${\abs{\zS}=0}$. The learning rate was set to~$10^{-3}$. PU~loss parameter ${\lambda=1}$ for both datasets while ${\alpha}$ was~$10^{-2}$ and ${3\cdot10^{-2}}$ for \fashmnist\ and MNIST respectively. \subsection{Baseline}\label{sec:Experiments:Baseline} \toolname's performance was compared against \elkan's algorithm. To the extent of our knowledge, \elkan\ have never released an official implementation of their method. This necessitated that we develop our own implementation of their approach for this work. \elkan's algorithm is built on top of any binary classifier that is \textit{well-calibrated}, i.e.~${\Pr[\hat{y} \vert x] \approx \Pr[y \vert x]}$. Most binary classifiers are not well-calibrated, with the calibration of neural networks being particularly poor~\cite{Guo:2017}. There are techniques that can transform a classifier to be well-calibrated including Platt scaling~\cite{Platt:1999} and isotonic regression. The scope of this project necessitated the use of logistic regression as \elkan's underlying classifier since it is naturally well-calibrated. A multilayer perceptron is significantly more expressive than a linear classifier, making the comparison in this section inherently unfair in our favor. For that reason, we provide results for a supervised baseline using neural network identical to \toolname's encoder plus a single output node. The fully-supervised training set consists of~$\Pos$ and a set of negative examples,~$\Neg$, disjoint from the inductive test set~$\Unlabel$. Supervised learner hyperparameters can be naively tuned via a grid search. These well-thread techniques are not immediately applicable to PU~learning as the validation set composition as well as the learner quality metrics are non-obvious. This work makes no effort to optimize hyperparameters in any way. They were instead tuned transductive accuracy, which represents an ceiling on our technique's performance. \subsection{Quantitative Results} \begin{table}[t] \centering \caption{Performance of \toolname, \elkan, and supervised learning for MNIST with positive class ``4'' negative class ``9''}\label{tab:Experiments:MNIST} \input{tables/res_mnist.tex} \end{table} \begin{table}[t] \centering \caption{Performance of \toolname, \elkan, and supervised learning for \fashmnist\ with positive class ``coat'' negative class ``ankle boot''}\label{tab:Experiments:FashionMNIST} \input{tables/res_fashion-mnist.tex} \end{table} \toolname's performance is judged against the baseline using the metrics of accuracy, area under the receiver operating characteristics curve (AUC ROC), and the F1~score. Tables~\ref{tab:Experiments:MNIST} and~\ref{tab:Experiments:FashionMNIST} contain the results for the MNIST and \fashmnist\ datasets respectively. \toolname\ outperforms \elkan\ across both datasets for all metrics. A supervised learner will almost always outperform a PU~learner since the former has access to labeled data from the negative class. This is in spite of the PU~learner treating the problem as inductive while the supervised learner treats it as inductive. \toolname\ is no exception to this rule. It is encouraging though that supervised learning's performance improvement was comparatively small, which underscores the merits of our algorithm. \subsection{Decision Boundary Margin}\label{sec:Experiments:Margin} Decision boundary margin is a common metric for measuring the quality of a binary classification algorithm. Example~$\xBase$ is labeled positive when ${\pHatDist<\nHatDist}$; otherwise $\xBase$~is labeled negative. Figure~\ref{fig:Experiments:UnlabelPlot} displays the positive and negative decoder losses for the MNIST experiment in Table~\ref{tab:Experiments:MNIST}. Each mark in the graph represents a single training example where the shape and color indicate the mark's actual label and whether the example was in $\Pos$ or~$\Unlabel$. $\pHatDist$ is the $x$-axis while $\nHatDist$ is the $y$-axis. The decision boundary where ${\pHatDist=\nHatDist}$ is represented by the gray dashed line. As previous described, the predicted label of any point of above this positive while those examples below the line are predicted negative. Ideally, all positive-valued examples (shown as blue circles for~$\Pos$ and red squares for~$\Unlabel$) would be in the upper left corner of the plot ---~${\pHatDist\ll\nHatDist}$~--- while negative-value examples would be in the lower right corner ---~${\pHatDist\gg\nHatDist}$. \begin{figure}[t] \centering \input{plots/scatter_separation.tex} \caption{Decision boundary margin for MNIST with ``4'' and ``9'' as positive \& negative classes respectively}\label{fig:Experiments:UnlabelPlot} \end{figure} Despite the high classification accuracy, margin is quite low with most samples clustered near the origin. As would be expected, the margin for negative examples (green triangles) is consistent and generally lower than positive examples. This is not unexpected given the absence of negative labeled data.
{ "alphanum_fraction": 0.7833005249, "avg_line_length": 117.2307692308, "ext": "tex", "hexsha": "0dd3c7d8b916cde55346bcd7aae32c5ee75f18ba", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b57f99c268ddb0c160266803ca96b3999beab4c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ZaydH/cis572", "max_forks_repo_path": "project/final_report/experiments.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8b57f99c268ddb0c160266803ca96b3999beab4c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ZaydH/cis572", "max_issues_repo_path": "project/final_report/experiments.tex", "max_line_length": 597, "max_stars_count": null, "max_stars_repo_head_hexsha": "8b57f99c268ddb0c160266803ca96b3999beab4c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ZaydH/cis572", "max_stars_repo_path": "project/final_report/experiments.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1474, "size": 6096 }
\subsection{Proposed work plan} \begin{frame} \frametitle{Progress chart} \begin{textblock*}{12.5cm}(0.1cm,2.2cm) % {block width} (coords) \begin{figure}[ht!] % replace 't' with 'b' to force it to \includegraphics[width=\textwidth]{./images/progress_chart.pdf} \caption{Workflow for the simulations proposed in this work.} \end{figure} \end{textblock*} \end{frame} \subsection{Stage 1: Basic on-line reprocessing demonstration} \begin{frame} \frametitle{MSBR online reprocessing analysis} \begin{columns} \column[t]{4.3cm} \begin{block}{SaltProc v0.1 demo for simple once-through \gls{MSR} reprocessing} \fontsize{7}{9}\selectfont \begin{itemize} \item Full-core model of the \gls{MSBR} for 60 years of operation \item FP removal from the salt with fixed, ideal extraction efficiencies \item $^{233}$Pa ideal removal and feed of an equal mass of $^{233}$U into the core \item Fresh fertile material feed to maintain the salt inventory \item Fine time resolution (3-day depletion steps) \end{itemize} \end{block} \column[t]{8cm} \begin{figure}[ht!] \centering \includegraphics[width=\textwidth]{../figures/keff_msbr.png} \caption{Effective multiplication factor dynamics for the full-core \gls{MSBR} model (reproduced from Rykhlevskii \emph{et al.} \cite{rykhlevskii_modeling_2019}).} \end{figure} \end{columns} \end{frame} \begin{frame} \frametitle{Effect of fission products removal} \begin{figure}[t] % replace 't' with 'b' to force it to \centering \includegraphics[width=0.8\textwidth]{../figures/keff_rem_cases.png} \caption{Calculated effective multiplication factor for the full-core \gls{MSBR} model with removal of various fission product groups over 10 years of operation (reproduced from Rykhlevskii \emph{et al.} \cite{rykhlevskii_modeling_2019}).} \end{figure} \end{frame} \subsection{Stage 2: Tool demonstration and validation for \gls{TAP}} \begin{frame} \frametitle{\gls{TAP} concept high-fidelity Serpent model} \begin{textblock*}{12.25cm}(0.25cm,1.8cm) % {block width} (coords) \begin{figure}[htp!] % replace 't' with 'b' to \includegraphics[width=\textwidth]{./images/tap_model.png} \caption{An $XY$ (left) and $XZ$ (right) section of the \gls{TAP} model. The violet color represents zirconium hydride, and the yellow represents fuel salt (reproduced from Rykhlevskii \& Huff \cite{rykhlevskii_milestone_2019}).} \end{figure} \end{textblock*} \end{frame} \begin{frame} \frametitle{Multi-component fuel reprocessing system model in SaltProc} \begin{columns} \column[t]{6cm} \begin{itemize} \item Fixed, non-ideal ($<100\%$) removal efficiencies \item Sparger and separator located in-line \item Static geometry with constant moderator-to-fuel ratio \item 5\% and 19.79\% low-enriched uranium feed \end{itemize} \column[t]{6.5cm} \begin{figure}[htp!] % replace 't' with 'b' to \centering \vspace{-7mm} \begin{overprint} \onslide<1>\includegraphics[height=0.8\textheight]{../figures/demo_reprocessing_scheme.png} \onslide<2>\includegraphics[height=0.8\textheight]{../figures/demo_reprocessing_scheme_2.png} \end{overprint} \caption{\gls{TAP} reprocessing scheme flowchart used for demonstration of SaltProc \cite{rykhlevskii_milestone_2019}.} \end{figure} \end{columns} \end{frame} \begin{frame} \frametitle{Depletion simulation results for TAP with various feeds} \begin{textblock*}{12.6cm}(0.1cm,2.2cm) % {block width} (coords) \begin{figure}[htp!] % replace 't' with 'b' to \begin{minipage}[b]{0.48\textwidth} \includegraphics[width=\linewidth]{../figures/keff_3.png} \end{minipage} \hspace{-2mm} \begin{minipage}[b]{0.48\textwidth} \includegraphics[width=\linewidth]{../figures/keff_zoomed_2.png} \end{minipage} \caption{Effective multiplication factor dynamics for full-core \gls{TAP} model for different fueling scenarios over a 13-year reactor operation (left) and for the time interval from 367 to 471 days after startup (right). Confidence interval $\pm\sigma=28pcm$ is shaded.} \end{figure} \end{textblock*} \end{frame} \begin{frame} \frametitle{Fuel salt composition evolution during the TAP operation} \begin{textblock*}{12.25cm}(0.25cm,1.8cm) % {block width} (coords) \begin{figure}[htp!] % replace 't' with 'b' to \centering \vspace{-3mm} \includegraphics[width=0.72\textwidth]{../figures/u_pu_mass.png} \caption{Mass of major nuclides during 13 years of reactor operation with 19.79\% \gls{LEU} feed.} \end{figure} \end{textblock*} \end{frame} \subsection{Stage 5: Safety parameters evolution} \begin{frame} \frametitle{Safety parameters calculations at start-up} \begin{columns} \column[t]{6cm} \begin{itemize} \item \gls{TAP} reactor operation range is 773-973K \item Temperature coefficient of reactivity calculated separately for fuel and moderator in range 800-1000K \item Temperature coefficients are negative at start-up but are expected to be more negative during operation \item A configuration of 25 control rods has a reactivity worth of $1110\pm9.7$pcm (1.1\%) at startup \item Serpent template to calculate temperature coefficients and control rod worth by single run was developed \end{itemize} \column[t]{6.5cm} \begin{figure}[bth!] % replace 't' with 'b' to \includegraphics[width=\textwidth]{../figures/axial_offset.png} \caption{\gls{TAP} model divided to multiple axial layers with different densities of the salt to calculate axial power offset.} \end{figure} \end{columns} \end{frame}
{ "alphanum_fraction": 0.7266263775, "avg_line_length": 33.4880952381, "ext": "tex", "hexsha": "765251007ec3759a24eb7b04d6ba76dc7b36c53b", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-02-21T14:58:10.000Z", "max_forks_repo_forks_event_min_datetime": "2019-02-13T18:59:46.000Z", "max_forks_repo_head_hexsha": "941352e79cf93aece93fc95aa6945c1ca11719ca", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "arfc/2019-rykhl-siam", "max_forks_repo_path": "safety.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "941352e79cf93aece93fc95aa6945c1ca11719ca", "max_issues_repo_issues_event_max_datetime": "2019-02-22T16:41:47.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-22T14:15:32.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "arfc/2019-rykhl-siam", "max_issues_repo_path": "safety.tex", "max_line_length": 94, "max_stars_count": null, "max_stars_repo_head_hexsha": "941352e79cf93aece93fc95aa6945c1ca11719ca", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "arfc/2019-rykhl-siam", "max_stars_repo_path": "safety.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1814, "size": 5626 }
\documentclass[11pt,a4paper,]{article} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \else % if luatex or xelatex \usepackage{unicode-math} \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \PassOptionsToPackage{hyphens}{url} % url is loaded by hyperref \usepackage[unicode=true]{hyperref} \hypersetup{ pdftitle={Migration Report}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{geometry} \geometry{a4paper, centering, text={16cm,24cm}} \usepackage[style=authoryear-comp,]{biblatex} \addbibresource{references.bib} \usepackage{longtable,booktabs} % Fix footnotes in tables (requires footnote package) \IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{long table}}{} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \title{Migration Report} %% MONASH STUFF %% CAPTIONS \RequirePackage{caption} \DeclareCaptionStyle{italic}[justification=centering] {labelfont={bf},textfont={it},labelsep=colon} \captionsetup[figure]{style=italic,format=hang,singlelinecheck=true} \captionsetup[table]{style=italic,format=hang,singlelinecheck=true} %% FONT \RequirePackage{bera} \RequirePackage[charter,expert,sfscaled]{mathdesign} \RequirePackage{fontawesome} %% HEADERS AND FOOTERS \RequirePackage{fancyhdr} \pagestyle{fancy} \rfoot{\Large\sffamily\raisebox{-0.1cm}{\textbf{\thepage}}} \makeatletter \lhead{\textsf{\expandafter{\@title}}} \makeatother \rhead{} \cfoot{} \setlength{\headheight}{15pt} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0.4pt} \fancypagestyle{plain}{% \fancyhf{} % clear all header and footer fields \fancyfoot[C]{\sffamily\thepage} % except the center \renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{0pt}} %% MATHS \RequirePackage{bm,amsmath} \allowdisplaybreaks %% GRAPHICS \RequirePackage{graphicx} \setcounter{topnumber}{2} \setcounter{bottomnumber}{2} \setcounter{totalnumber}{4} \renewcommand{\topfraction}{0.85} \renewcommand{\bottomfraction}{0.85} \renewcommand{\textfraction}{0.15} \renewcommand{\floatpagefraction}{0.8} %\RequirePackage[section]{placeins} %% SECTION TITLES %% SECTION TITLES (NEW: Changing sections and subsections color) \RequirePackage[compact,sf,bf]{titlesec} \titleformat*{\section}{\Large\sf\bfseries\color[rgb]{0.8, 0.7, 0.1 }} \titleformat*{\subsection}{\large\sf\bfseries\color[rgb]{0.8, 0.7, 0.1 }} \titleformat*{\subsubsection}{\sf\bfseries\color[rgb]{0.8, 0.7, 0.1 }} \titlespacing{\section}{0pt}{2ex}{.5ex} \titlespacing{\subsection}{0pt}{1.5ex}{0ex} \titlespacing{\subsubsection}{0pt}{.5ex}{0ex} %% TITLE PAGE \def\Date{\number\day} \def\Month{\ifcase\month\or January\or February\or March\or April\or May\or June\or July\or August\or September\or October\or November\or December\fi} \def\Year{\number\year} %% LINE AND PAGE BREAKING \sloppy \clubpenalty = 10000 \widowpenalty = 10000 \brokenpenalty = 10000 \RequirePackage{microtype} %% PARAGRAPH BREAKS \setlength{\parskip}{1.4ex} \setlength{\parindent}{0em} %% HYPERLINKS \RequirePackage{xcolor} % Needed for links \definecolor{darkblue}{rgb}{0,0,.6} \RequirePackage{url} \makeatletter \@ifpackageloaded{hyperref}{}{\RequirePackage{hyperref}} \makeatother \hypersetup{ citecolor=0 0 0, breaklinks=true, bookmarksopen=true, bookmarksnumbered=true, linkcolor=darkblue, urlcolor=blue, citecolor=darkblue, colorlinks=true} \usepackage[showonlyrefs]{mathtools} \usepackage[no-weekday]{eukdate} %% BIBLIOGRAPHY \makeatletter \@ifpackageloaded{biblatex}{}{\usepackage[style=authoryear-comp, backend=biber, natbib=true]{biblatex}} \makeatother \ExecuteBibliographyOptions{bibencoding=utf8,minnames=1,maxnames=3, maxbibnames=99,dashed=false,terseinits=true,giveninits=true,uniquename=false,uniquelist=false,doi=false, isbn=false,url=true,sortcites=false} \DeclareFieldFormat{url}{\texttt{\url{#1}}} \DeclareFieldFormat[article]{pages}{#1} \DeclareFieldFormat[inproceedings]{pages}{\lowercase{pp.}#1} \DeclareFieldFormat[incollection]{pages}{\lowercase{pp.}#1} \DeclareFieldFormat[article]{volume}{\mkbibbold{#1}} \DeclareFieldFormat[article]{number}{\mkbibparens{#1}} \DeclareFieldFormat[article]{title}{\MakeCapital{#1}} \DeclareFieldFormat[article]{url}{} %\DeclareFieldFormat[book]{url}{} %\DeclareFieldFormat[inbook]{url}{} %\DeclareFieldFormat[incollection]{url}{} %\DeclareFieldFormat[inproceedings]{url}{} \DeclareFieldFormat[inproceedings]{title}{#1} \DeclareFieldFormat{shorthandwidth}{#1} %\DeclareFieldFormat{extrayear}{} % No dot before number of articles \usepackage{xpatch} \xpatchbibmacro{volume+number+eid}{\setunit*{\adddot}}{}{}{} % Remove In: for an article. \renewbibmacro{in:}{% \ifentrytype{article}{}{% \printtext{\bibstring{in}\intitlepunct}}} \AtEveryBibitem{\clearfield{month}} \AtEveryCitekey{\clearfield{month}} \makeatletter \DeclareDelimFormat[cbx@textcite]{nameyeardelim}{\addspace} \makeatother \author{\sf\Large\textbf{ Priya Ravindra Dingorkar}\\ {\sf\large BE in CE \& Pursing Master of Business Analytics\\[0.5cm]} \sf\Large\textbf{ Aarathy Babu}\\ {\sf\large B.Tech\\[0.5cm]} \sf\Large\textbf{ Junhao Wang}\\ {\sf\large B.Accounting\\[0.5cm]} \sf\Large\textbf{ Dilinie Seimon}\\ {\sf\large BSc.\\[0.5cm]}} \date{\sf\Date~\Month~\Year} \makeatletter \lfoot{\sf Dingorkar, Babu, Wang, Seimon: \@date} \makeatother %%%% PAGE STYLE FOR FRONT PAGE OF REPORTS \makeatletter \def\organization#1{\gdef\@organization{#1}} \def\telephone#1{\gdef\@telephone{#1}} \def\email#1{\gdef\@email{#1}} \makeatother \organization{ETC5513 - Collaborative and Reproducible Practices} \def\name{Department of\newline Econometrics \&\newline Business Statistics} \telephone{(03) 9905 2478} \email{[email protected]} %NEW: New email addresss \def\webaddress{\url{http://company.com/stats/consulting/}} %NEW: URl \def\abn{12 377 614 630} % NEW: ABN \def\logo{\includegraphics[width=6cm]{logo}} %NEW: Changing logo \def\extraspace{\vspace*{1.6cm}} \makeatletter \def\contactdetails{\faicon{phone} & \@telephone \\ \faicon{envelope} & \@email} \makeatother %%%% FRONT PAGE OF REPORTS \def\reporttype{Report for} \long\def\front#1#2#3{ \newpage \begin{singlespacing} \thispagestyle{empty} \vspace*{-1.4cm} \hspace*{-1.4cm} \hbox to 16cm{ \hbox to 6.5cm{\vbox to 14cm{\vbox to 25cm{ \logo \vfill \parbox{6.3cm}{\raggedright \sf\color[rgb]{0.8, 0.7, 0.1 } % NEW color {\large\textbf{\name}}\par \vspace{.7cm} \tabcolsep=0.12cm\sf\small \begin{tabular}{@{}ll@{}}\contactdetails \end{tabular} \vspace*{0.3cm}\par ABN: \abn\par } }\vss}\hss} \hspace*{0.2cm} \hbox to 1cm{\vbox to 14cm{\rule{4pt}{26.8cm}\vss}\hss\hfill} %NEW: Thicker line \hbox to 10cm{\vbox to 14cm{\vbox to 25cm{ \vspace*{3cm}\sf\raggedright \parbox{11cm}{\sf\raggedright\baselineskip=1.2cm \fontsize{24.88}{30}\color[rgb]{0, 0.29, 0.55}\sf\textbf{#1}} % NEW: title color blue \par \vfill \large \vbox{\parskip=0.8cm #2}\par \vspace*{2cm}\par \reporttype\\[0.3cm] \hbox{#3}%\\[2cm]\ \vspace*{1cm} {\large\sf\textbf{\Date~\Month~\Year}} }\vss} }} \end{singlespacing} \newpage } \makeatletter \def\titlepage{\front{\expandafter{\@title}}{\@author}{\@organization}} \makeatother \usepackage{setspace} \setstretch{1.5} \usepackage{float} \let\origfigure\figure \let\endorigfigure\endfigure \renewenvironment{figure}[1][2] { \expandafter\origfigure\expandafter[H] } { \endorigfigure }%% Any special functions or other packages can be loaded here. \usepackage{booktabs} \usepackage{longtable} \usepackage{array} \usepackage{multirow} \usepackage{wrapfig} \usepackage{float} \usepackage{colortbl} \usepackage{pdflscape} \usepackage{tabu} \usepackage{threeparttable} \usepackage{threeparttablex} \usepackage[normalem]{ulem} \usepackage{makecell} \usepackage{xcolor} \begin{document} \titlepage \section*{Acknowledgement} Our sincere gratitude goes out to Dr.Patricia Menéndez and the tutors; Stephanie Kobakian, Mark Dulhunty and Pablo Montero Manso for their guidance and support during the semester. Their culminative efforts have put us in a position where we are able to collaboratively produce this report while demonstrating our ability to use git tools. \pagebreak \section*{Abstract} There is no doubt that globalization is here to stay and that it continues to intensify and to reshape our identities and living spaces. \textbf{Migration} is one important factor that enhances globalization and helps in cultural and socio-economic changes in a large scale. This report will focus on a few aspects of migration. \section*{Introduction} Migration is the movement of people from one place to another with the intentions of settling permanently or temporarily at a new location. With increased globalization, easy access to opportunities across the world and increased educational and skill levels of individuals, migration has become a common occurrence across the world. The impact of migration is an important factor affecting both the home country and the destination country of the migrant. The impact however varies by the reason of migration. Labor migration, skill based migration, migration for education, marriage migration and migration of refugees are a few categories of migration based on the reason for migration. Within the context of this report, the migration statistics in the years 2015 and 2016 will be analysed based on demographic factors of the migrants, their education and skill levels, preference of countries to migrate to and the type of occupations they are most likely to migrate for. The analysis will compare countries, continents and population within countries in deriving conclusions. \begin{figure} \centering \includegraphics{data/image.png} \caption{Migration Facts and Figures} \end{figure} \section*{Data Sources} The Organisation for Economic Co-operation and Development(OECD) provides a dataset based on the population censuses of OECD countries around the world. The OECD and the World Bank also collaborate in providing an \href{http://www.oecd.org/els/mig/dioc.htm}{extensive dataset on the immigrants from OECD countries to non-OECD countries}. The dataset from the database of Immigrants in OECD and non-OECD countries from the year 2015-2016 will be used in the analysis within the report. The dataset includes information such as demographic characteristics of the immigrants, their duration stay, labor market outcomes, fields of study, educational attainment and their place of birth. Along with the dataset, the OECD provides documentation on the methodology used in obtaining the data and a description of limitations in the methodology. The metadata also contains a detailed explanation of the spread of data over several files, the encoded variables, the domain of each variable and reasons for missing values in specific variables The OECD makes data available for use and consultation by the public under the license \textbf{CC BY-NC-SA 3.0 IGO}. This license allows us to share and modify data apart from commercial uses and has strict restrictions of attribution. A few important definitions required to understand this report are mentioned below. What is OECD ? The Organisation for Economic Co-operation and Development (OECD) is a group of 34 member countries that discuss and develop economic and social policy. OECD members are democratic countries that support free-market economies What is ISCED ? The International Standard Classification of Education is a statistical framework for organizing information on education maintained by the United Nations Educational, Scientific and Cultural Organization. It is a member of the international family of economic and social classifications of the United Nations. What is ISCO ? The International Standard Classification of Occupations is an International Labor Organization classification structure for organizing information on labor and jobs. \subsection*{Limitations of the dataset} The Database on Immigrants in OECD Countries (DIOC) 2015/16 provides information on age,duration of stay, labour force status and occupation of the migrants. The limitations of the dataset is as follows, \begin{enumerate} \item Confidentiality and Reliability issues Due to confidentiality issues, the place of birth is recorded at the continental level such as Africa, Asia, Europe, etc.instead of the country level in the dataset,therefore the analysis is focused on only the destination countries.Also,there is only limited details available on the important variables like educational qualification and skills. \item Unavailability of data Unavailability of data on variables like age, skills,occupation ,labour force status and educational qualification proved to be a limitation to the analysis of the dataset. \item Inconsistency of data The dataset contains inconsistency in data for example the reference population for Switzerland, Luxembourg and New Zealand is 15+ and there is no information on the age group 0-14. Due to this reason and the fact that 0-14 age group belongs to the minor category, the age group 0-14 was not considered for the analysis of gender gap in migrant population. \end{enumerate} \section*{Research Ethics} Dataset used in the report for analysis, has a lot of unknown values due to confidentiality issues and the methods used to ensure reliability and protect confidentiality. Major principles of research ethics have been used in this report no labels or absurd statements have been made keeping in mind obtain informed consent from potential research participants, minimize the risk of harm to participants, protect their anonymity and confidentiality, avoid using deceptive practices and give participants the right to privacy. \section*{Methodology} Every migrant analyzes a few factors of the destination country before actually settling permanently or temporarily. Some of the factors are which are the best destination countries, what are the job opportunities, what is the unemployment rate, what is the level of education and their recognition worldwide, and similar many other factors. But our report is a live dataset from the past years and shows true analysis of what is the current situation of migration across the globe and particularly about Australia as well. Since our dataset is large and contains many files this report only focuses on four research questions, giving all of us a gist about the state of migration and migrants, across the world. \pagebreak \section*{Research Questions} There research question we have aimed to answer are as follows: \begin{itemize} \tightlist \item The top destinations for migration, the gender difference in migration in total and across different age groups \item Identifying the relationship between unemployment and education level as well as the gender gap in unemployment \item Analyzing the level of education of residents of Australia versus the duration they have been in Australia. \item Distribution of various skill levels, types of occupation and overqualified population across the globe in different continents \end{itemize} \pagebreak \section*{Migration and Gender Gap} Throughout history, humans have migrated for various reasons that could be classified as economic, social, political, etc. This section focuses on the preferred destination countries for migration as well as the gender difference across age groups in the migrant population in destination countries. The table \ref{tab:topdestinations} depicts the preferred destinations for migration in the year 2015/16. \textcite{oecd_2019} states that historically, the USA have been the most popular destination for migration and has remained the same in the year 2015-16, with the most preferred destinations being the USA, Japan, and Mexico across all ages. \begin{table}[H] \caption{\label{tab:topdestinations}Top 10 destinations} \centering \begin{tabular}[t]{lr} \toprule Country & Migrant\_Count\\ \midrule \rowcolor{gray!6} United States of America & 323127504\\ Japan & 127094759\\ \rowcolor{gray!6} Mexico & 119561904\\ Germany & 82424843\\ \rowcolor{gray!6} Tunisia & 78741055\\ \addlinespace France & 66190412\\ \rowcolor{gray!6} United Kingdom & 64381731\\ Italy & 60383365\\ \rowcolor{gray!6} Korea & 48141031\\ Spain & 45977782\\ \bottomrule \end{tabular} \end{table} Among the international migrants, 50.94 \% are females and 49.06 \% are males. Figure \ref{fig:gendergap} shows the gender gap in each country's migrant population and it is observed that in 2015-16, women constituted more in the migrant population especially in the top preferred destinations like the USA, Japan, etc whereas the gap is relatively low in countries like Belgium, Finland, etc. \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/gendergap-1.pdf} \caption{\label{fig:gendergap}Gender difference in migrant population in countries} \end{figure} In figure \ref{fig:agegap}, we can see that in 2015-16, the most number of migrants fell in the age group 25-64, with females being higher in number compared to males in countries like the USA, Mexico, Germany, etc. In countries like Japan, Korea and Spain it can be seen that there is no gender gap in the migrant population of age group 25-64 whereas in countries like Poland and Turkey the number of males is higher than females for the same age group. Amongst the age group 15-24, there is a relatively low gender difference in the migrant population compared to the other age groups and the trend is uniform in almost all the countries whereas amongst the migrants who are 65 years old and above, the number of females is higher than that of males in the USA, Japan , Italy, etc and Mexico being one of the top destinations for migration, saw an influx of an equal number of males and females aged over 65 in the year 2015-16. \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/agegap-1.pdf} \caption{\label{fig:agegap}Gender difference in migrant population across age groups in countries} \end{figure} \pagebreak \section* {The Education Level of Australian Residents} In the past few decades, Australia has acquired many skilled migrants, contributing substantially towards the local economy. \textcite{hawthorne2010valuable} states that by 2007, two thirds of them were former international students recruited in Australia rather than offshore applicants. This transition has also been named as the \emph{``two-step-migration''} by \textcite{hawthorne2010valuable}. This section will analyse the education levels of current residents of Australia categorized by their duration of stay in the country. The dataset provides information on the education levels of Australian residents and their duration of stay within the country.\\ The residents of Australia have been categorized based on their duration of stay within the country as \begin{itemize} \tightlist \item native born \item less than 5 years \item between 5 - 10 years \item more than 10 years \end{itemize} The level of education has been categorized using a broad three-category classification as \begin{itemize} \tightlist \item low \item medium \item high \end{itemize} However the duration of stay of 25.9\% of Australian residents is unknown, while the education level of 24.6\% residents is unknown. These values will also be used in the analysis as unknowns, to provide a more accurate overlook on the overall numbers. \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/tile-plot-edu-vs-duration-1.pdf} \caption{\label{fig:tile-plot-edu-vs-duration}Breakdown of Australian residents based on their level of Education and duration of stay in Australia} \end{figure} Figure \ref{fig:tile-plot-edu-vs-duration} shows the breakdown of Australian residents based on their duration of stay in Australia and their level of education; the higher counts of residents are represented by light blue and the lower number of residents are represented by dark blue. The dataset contains a few unknowns about the residents, some on the level of education and some on the duration of stay, which is also represented in figure \ref{fig:tile-plot-edu-vs-duration}. It's obvious that the highest counts of Australian residents are natives, and is interesting that a majority of them have a medium level of education, followed by natives having a higher level of education and only a few of them having a low level of education. It's also interesting how the numbers of Australian residents who have been in the country for over ten years, shows higher counts of numbers than those who have been in the country for a lower duration of time. It can also be seen that in all three categories of non-native born Australians, most have received a high level of education. \begin{table}[H] \caption{\label{tab:table-percent-levels-of-education}Percentages of Australian residents with different levels of education over each category of duration of stay} \centering \resizebox{\linewidth}{!}{ \begin{tabular}[t]{l|l|l|l|l} \hline Duration of stay & High level of education (\%) & Medium level of education (\%) & Low level of education (\%) & Unknown level of education (\%)\\ \hline \rowcolor{gray!6} five-to-ten-years & 54\% & 31\% & 9\% & 5\%\\ \hline five-years-or-less & 50\% & 34\% & 10\% & 6\%\\ \hline \rowcolor{gray!6} more-than-ten-years & 37\% & 34\% & 21\% & 8\%\\ \hline native-born & 29\% & 41\% & 22\% & 8\%\\ \hline \end{tabular}} \end{table} A breakdown of Australian residents with different durations of stay, into their level of education is represented in table \ref{tab:table-percent-levels-of-education}. It can be seen that the majority of individuals who have obtained residency in Australia recently have a high level of education while only a few of them have a low level of education. A significant increase in percentages of individuals with higher levels of education obtaining residency in Australia can also be seen from table \ref{tab:table-percent-levels-of-education}. Most native-Australians (41\%) have a medium level of education, while others with shorter durations of stay in Australia have much higher levels of education. \textcite{green2007immigrant} states that the immigration policies in Australia has placed an increased focus in skill-based selection criteria, and has resulted in over-education of recently arrived Australian immigrants. This can be attributed to the most of the recent immigrants having a high level of education as represented by figure \ref{fig:tile-plot-edu-vs-duration} and table \ref{tab:table-percent-levels-of-education}. \pagebreak \section*{Gender Gap and Education with Unemployment rate} \begin{table}[H] \caption{\label{tab:unnamed-chunk-9}unemployment gendergap} \centering \begin{tabular}[t]{l|r|r|r} \hline country & unemployrateM & unemployrateF & unemployrateGAP\\ \hline \rowcolor{gray!6} AUS & 4.862303 & 4.011510 & -0.8507930\\ \hline AUT & 5.336735 & 3.935391 & -1.4013437\\ \hline \rowcolor{gray!6} BEL & 4.347939 & 3.298562 & -1.0493775\\ \hline CAN & 5.778106 & 4.327074 & -1.4510321\\ \hline \rowcolor{gray!6} CHE & 3.865908 & 3.214473 & -0.6514349\\ \hline CHL & 4.809590 & 3.957720 & -0.8518701\\ \hline \rowcolor{gray!6} CZE & 2.396411 & 2.451591 & 0.0551801\\ \hline DEU & 2.936210 & 2.046914 & -0.8892954\\ \hline \rowcolor{gray!6} DNK & 2.148729 & 2.105992 & -0.0427379\\ \hline ESP & 10.985712 & 10.894353 & -0.0913595\\ \hline \end{tabular} \end{table} \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/GAP-1.pdf} \caption{\label{fig:GAP}unemployment gender gap} \end{figure} As is shown in figure \ref{fig:GAP} , the unemployment rate differences between female and male are negative in most countries. We can draw conclusion from that generally speaking, gender gap in unemployment rate does exist. Surprisingly, the unemployment rate gaps are negative in most country, this means female have lower unemployment rate than male in most countries. \begin{table}[H] \caption{\label{tab:unnamed-chunk-11}unemployment rate on different level of education} \centering \begin{tabular}[t]{l|r|r|r} \hline country & high & low & medium\\ \hline \rowcolor{gray!6} AUS & 0.0354576 & 0.0443595 & 0.0532527\\ \hline AUT & 0.0311428 & 0.0701552 & 0.0457923\\ \hline \rowcolor{gray!6} BEL & 0.0298011 & 0.0431251 & 0.0461857\\ \hline CAN & 0.0452058 & 0.0517304 & 0.0601727\\ \hline \rowcolor{gray!6} CHE & 0.0328462 & 0.0416613 & 0.0336930\\ \hline CHL & 0.0179900 & 0.0427796 & 0.0498058\\ \hline \rowcolor{gray!6} CZE & 0.0167868 & 0.0409399 & 0.0226775\\ \hline DEU & 0.0160581 & 0.0374303 & 0.0236941\\ \hline \rowcolor{gray!6} DNK & 0.0231097 & 0.0177326 & 0.0225601\\ \hline ESP & 0.0880976 & 0.1186934 & 0.1216533\\ \hline \end{tabular} \end{table} \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/ed-un-1.pdf} \caption{\label{fig:ed-un}unemployment rate among different education level} \end{figure} From figure \ref{fig:ed-un} we can see that the situation in different countries varies. most countries have the unemployment rate under 10\%. Generally speaking, most have lower unemployment rate compared to low education level groups, but surprisingly, this is not the case in TUR,PRT,RGC AND ITA. Limitation: There are some missing values that can influence the outcome to some extent, it is like the data I use became a smaller sample. \pagebreak \section* {Skills, Occupation and Qualification across the Globe} \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/skill-1.pdf} \caption{\label{fig:skill}Types of Skill} \end{figure} In figure \ref{fig:skill} talks about the different \textbf{Skill Levels}. It is defined as a function of the complexity and range of tasks and duties to be performed in an occupation into groups that people have across the globe in. Let us understand the different skills. Skill Level 01 typically involve the performance of simple and routine physical or manual task, these task require physical strength and/or endurance. Skill Level 02 typically involve the performance of tasks such as operating machinery and electronic equipment, they should also have the ability to read and write to a certain extend. Skill Level 03 involves the performance of complex technical and practical tasks that require an extensive body of factual, technical and procedural knowledge in a specialized field, requires high level literacy and numeracy and well developed interpersonal communication skills. The graph here shows that \textbf{Europe} have the highest number of migrants with all these skill levels. \textbf{North America} has very less migrants involved in skill level 01 and most migrants have skill level 02 and level 03. \textbf{South America} have more people with skill level 02 followed by skill level 03 and skill level 01. \textbf{Oceania} has very few migrants with skill level 01 and people with skill level 02 and skill level 03. Most of the migrants in \textbf{Asia} have skill level 02 and skill level 03. \textbf{Africa} has very migrants with all the skill levels. \begin{figure} \centering \includegraphics{ETC5513assignment4_files/figure-latex/qual-1.pdf} \caption{\label{fig:qual}Occupation Types} \end{figure} In figure \ref{fig:qual}, Overqualified indicates whether a worker is highly educated for the job that he is doing in a particular continent with respect to their education attainment he has received. Looking at the figure closely, we have seen that, \textbf{North America} has the highest over qualified people, followed by \textbf{Europe}, \textbf{South America}, \textbf{Asia}, \textbf{Africa} and \textbf{Oceania}. Not overqualified here refers to that, most migrants have just the right level of education attainment. We see that the top three continents with the right qualification amongst the migrants are \textbf{Europe}, \textbf{North America} and \textbf{Asia} \begin{table}[H] \caption{\label{tab:occupa}List of Occupations that people engage in after Migration} \centering \resizebox{\linewidth}{!}{ \begin{tabular}[t]{lrrrrrr} \toprule Occupations & Africa & Asia & Europe & North America & Oceania & South America\\ \midrule \rowcolor{gray!6} Armed Forced Occupations & 16578 & 134184 & 1161864 & 57483 & 78087 & 123354\\ Clerical Support Worker Occupations & 346142 & 3335802 & 20918780 & 807736 & 854191 & 3722436\\ \rowcolor{gray!6} Craft and Related trade Occupations & 507452 & 3238802 & 23205091 & 1746178 & 511284 & 8491454\\ Elementary Occupations & 1039390 & 4126580 & 19477937 & 490819 & 63336 & 10020788\\ \rowcolor{gray!6} Managerial Occupations & 295844 & 1351722 & 12933257 & 1541335 & 1050874 & 1736952\\ \addlinespace Plant and Machine Operators and Assembler Occupations & 402639 & 3854407 & 14180541 & 741683 & 698375 & 5953850\\ \rowcolor{gray!6} Professional Occupations & 820935 & 7401650 & 37449078 & 3015180 & 1699599 & 5056319\\ Service and Sales Worker Occupations & 817080 & 4945890 & 31421967 & 2355201 & 1124498 & 11610410\\ \rowcolor{gray!6} Skilled Agricultural, Forestry and Fishery Occupations & 187722 & 1537852 & 8223032 & 73344 & 784091 & 4668935\\ Technicians and Associate Professional Occupations & 554736 & 5544054 & 34331795 & 2866619 & 1120886 & 3800846\\ \bottomrule \end{tabular}} \end{table} In table \ref{tab:occupa} we have a list of different occupations levels as per the International Standard Classification of Occupations \textcite{ilo}. This table has 10 rows and 8 columns. Looking more closely at the table we deduce that they are more number of migrants who engage themselves in Elementary Occupations across the globe, scrutinizing the graph, we realize that the numbers are high throughout all region for Elementary Occupations. Professional Occupations see a different pattern throughout and we notice that Europe has higher number of migrants in engaged in professional and skilled jobs compared to the other continents. Armed Forced Occupations displays less number of migrants involved in similar jobs. On the whole, we observe the count of different number of migrants occupied in different classified occupations. \pagebreak \section*{Conclusion} Migrants have made significant impacts on socio-economic as well as the political sphere of a destination countries. From the analysis on the Database on Immigrants in OECD Countries (DIOC) released by The Organisation for Economic Co-operation and Development (OECD), by looking at the migration in different age groups, we have deduced that people between 25-64 migrate more, as compared to the other age groups. Also amongst them we have observed that females migrate more as compared to men. We have also looked at the top 10 destination countries for migration across the globe. Furthermore, we have seen the different education attainment levels in Australia with respect to their duration of stay and from our analysis derived that, migrating population has higher education attainment when compared to the native Australians. In addition, we have look at the gender gap and unemployment rate, have found out that women have lower unemployment rate when compared to the men and also observed that in most cases education attainment level is directly proportionate to the employment opportunities. Lastly we were introduced to the different skill levels and qualification levels across the globe where, most migrants are engaged in skill level 02 and skill level 03 across the globe, and we see that most people across the globe are just rightly qualified with respect to their employment. Further we have understood the importance of a clean reproducible workflow and working collaboratively as a team. The team together has helped enhance knowledge about various github tools, Rmarkdown and Rstudio. On the whole this team has helped each other foster their creativity and a great knowledge exchange throughout the project. \pagebreak \nocite{*} \printbibliography \end{document}
{ "alphanum_fraction": 0.7757114848, "avg_line_length": 49.7958579882, "ext": "tex", "hexsha": "671882e0b30c3a29ef91158ca5a9dd845e7a1061", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5015ada9dede19e2ac3c2123bf0d5d38b2e60872", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "priya51096/ETC5513-Assignment-4-", "max_forks_repo_path": "ETC5513assignment4.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "5015ada9dede19e2ac3c2123bf0d5d38b2e60872", "max_issues_repo_issues_event_max_datetime": "2020-06-12T06:03:03.000Z", "max_issues_repo_issues_event_min_datetime": "2020-06-09T13:57:50.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "priya51096/ETC5513-Assignment-4-", "max_issues_repo_path": "ETC5513assignment4.tex", "max_line_length": 1477, "max_stars_count": null, "max_stars_repo_head_hexsha": "5015ada9dede19e2ac3c2123bf0d5d38b2e60872", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "priya51096/ETC5513-Assignment-4-", "max_stars_repo_path": "ETC5513assignment4.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 9141, "size": 33662 }
\section{Episode 53: Vaulted Feelings} \medskip \DndDropCapLine{R}ecap - left the tavern, took a long time to get to the mine. Got to the mine. Went down into the depths - met some delvers - tried to recruit them. No dice. There’s 3 settlements/shitholes/townships/smallerthanthat places down here - tent towns. Went and picked up a miner (nopaedo). Met an old lady that made them tea. Tried to get her to rebel against the upper world. Riphards uncle and Lilith are both down here. Went to a camp, talked to a blacksmith - told them where L and RU (Targon Hardthrust) had been. Old hardthrusts lived under the earth, found a vault-type-thing. Targon had an arrow in his belly.\medskip We’re in a mine! How’d we get here?! Oh that’s right. Gotta stop day-dreaming. There’s a smell of burned flesh… Ah - kolo gave the Dwarf-man (Targon) some field medicine. Learning from the best. We chingwag with the old Hardstone, (or was it hardthrust?) and put him on Stanri - This is a Hardstonethrust vault of some description. Stanri becomes a bridge over some dodgey chalksidian (obsidian disguised to look like chalk - Riphard). ~ so that we don’t fall into the bowely below. Kolo and Myron walk into an obviously trapped room. See no traps. Open a trapped chest. Get locked into the room and gassed. Myron beasts it, Kolo almost dies. Riphard farts on Kolo to hope it works. Exme shocks the Kolo. He gets better.\medskip We find a gap. I fail to throw the bear across it, old age must be getting to me. Myron walks on air with his fancy shoes, and holds a rope. Kolo climbs the rope across, but Myron fails and we almost lose Kolo, save for Bearvine intervention. We leave Myron to his fate, trapped on the island, and me and Exme and Riphard go to a room with dwarvern characters - which I can read! Ma and Pa always said that dwarvish was useful, I figured it was just one of those national pride things, and kinda resented them for making me learn it. Guess it’s not so dumb now!\medskip There’s a mural with sentences, and runes on the floor. One of those “don’t step on the cracks” things - and also all the wrong runes as there’s poison darts everywhere. I go looking for a chest Myron stole earlier to create a shield and brute force the puzzle, until Exme points out that there’s spelling errors in all the names. Lapin, Ametryst, Paradot, Oryx, Garnot. Derp. We step the pattern of “Shine”.(because there wasn’t an E near after the H. Kolo pulls the lever. And then runs through the dart trap… Shoddy dwarven constructions - should have switched off the countermeasure once the puzzle was solved!\medskip We get to a hole. Myron shouts down it. A tentacle completely fails to grab him. Welp, that happened. We come to an area with slippery emerald. Wait, glass with a weird liquid. Super slippery, down into the spikes we go! Stanri breaks our falls! We go to a chamber with a statue. The statue comes alive. It slows the balls out of me and Myron - Exme blows the fuck out of it - I’m dodging in place. Exme uses the die to buff all our stuff. It wasn’t needed. Riphard kills it.\medskip We go into a chamber and find gold and weapons. Taragon is racist. Kolo kills him and starts eating his liver. I shock him - He’s gone feral! I drag Targon’s corpse away to loot and dispose of - why does every culture canibalise their dead? I get a good whip+1/+1 - Really nice hand crossbow (+2/+2) and a fancy cloak. Then me and kolo dump his body into the tentacle well. I pretend to eat a chunk, smearing blood on my chin, Dwarf cannibal tradition. Loads of cash, like 7k, 3k of that in gems, and a bunch of axes and things. Couple nice crossbows +2/+2, and about 3 waraxes +1+1, with some less good stuff.\medskip We go back to the last gasp camp and confuse the blacksmith, and the guy with a dodgey leg. We sleep, some people cry for Kolo, no one crys for Rips uncle. It’s an uneasy rest. We go in search of Lillith in the morning. We eventually get to a mining camp, and get ambushed by mongoloids. Wait, no, troglodytes. We kill them quickly. Stanri is a train. We come across a pair of dicks, gas-breathing Gorgons. They breath hot gas at us. Kolo starts getting stoned. Kolo is no longer stoned. We focus the right-side. Myron, me and Rip start turning to stone - I shrug it off.\medskip I slice the first (already weakened) one to bits, Stanri demolishes the second. We’ve got two stoned companions. Lilith is barrel. She tells us there’s an under-mine that might have the excallibrum. But we’ve got two stone-shaped companions to try and help out.\medskip \vspace*{5mm} \begin{center} \includegraphics[width=\textwidth]{./content/img/xxx.jpg} \begin{figure}[h] \end{figure} \end{center} \clearpage
{ "alphanum_fraction": 0.7665394402, "avg_line_length": 152.1290322581, "ext": "tex", "hexsha": "33f5e21cd58908cb53edf5dca2efb07fdd8212fb", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-10-04T09:40:24.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-04T09:40:24.000Z", "max_forks_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mcgi5sr2/velterraBook", "max_forks_repo_path": "content/eps/53.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mcgi5sr2/velterraBook", "max_issues_repo_path": "content/eps/53.tex", "max_line_length": 728, "max_stars_count": null, "max_stars_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mcgi5sr2/velterraBook", "max_stars_repo_path": "content/eps/53.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1253, "size": 4716 }
\documentclass[12pt]{article} \usepackage{url} \title{Ten Secrets to Giving a Good Scientific Talk} \author{You} \begin{document} \maketitle \section{Introduction} The text for this exercise is a significantly abridged, and slightly modified, version of the excellent article of the same name by Mark Schoeberl and Brian Toon: \url{http://www.cgd.ucar.edu/cms/agu/scientific_talk.html} \section{The Secrets} I have compiled this personal list of ``Secrets'' from listening to effective and ineffective speakers. I don't pretend that this list is comprehensive --- I am sure there are things I have left out. But, my list probably covers about 90\% of what you need to know and do. \begin{enumerate} \item Prepare your material carefully and logically. Tell a story. \item Practice your talk. There is no excuse for lack of preparation. \item Don't put in too much material. Good speakers will have one or two central points and stick to that material. \item Avoid equations. It is said that for every equation in your talk, the number of people who will understand it will be halved. That is, if we let $q$ be the number of equations in your talk and $n$ be the number of people who understand your talk, it holds that \begin{equation} n = \gamma \left( \frac{1}{2} \right)^q \end{equation} where $\gamma$ is a constant of proportionality. \item Have only a few conclusion points. People can't remember more than a couple things from a talk especially if they are hearing many talks at large meetings. \item Talk to the audience not to the screen. One of the most common problems I see is that the speaker will speak to the viewgraph screen. \item Avoid making distracting sounds. Try to avoid ``Ummm'' or ``Ahhh'' between sentences. \item Polish your graphics. Here is a list of hints for better graphics: \begin{itemize} \item Use large letters. \item Keep the graphics simple. Don't show graphs you won't need. \item Use color. \end{itemize} \item Be personable in taking questions. \item Use humor if possible. I am always amazed how even a really lame joke will get a good laugh in a science talk. \end{enumerate} \end{document}
{ "alphanum_fraction": 0.765060241, "avg_line_length": 37.2068965517, "ext": "tex", "hexsha": "c663c3a8811dd2e1d203c16c504d374ce001f4cd", "lang": "TeX", "max_forks_count": 85, "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:21:09.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T17:58:25.000Z", "max_forks_repo_head_hexsha": "1098682ae967a67b8da9edde5acdb44edda42107", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rjp0i/latex-intro", "max_forks_repo_path": "en/recap-exercise-solution.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "1098682ae967a67b8da9edde5acdb44edda42107", "max_issues_repo_issues_event_max_datetime": "2016-10-05T02:07:23.000Z", "max_issues_repo_issues_event_min_datetime": "2016-10-02T04:49:46.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rjp0i/latex-intro", "max_issues_repo_path": "en/recap-exercise-solution.tex", "max_line_length": 272, "max_stars_count": 165, "max_stars_repo_head_hexsha": "1098682ae967a67b8da9edde5acdb44edda42107", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rjp0i/latex-intro", "max_stars_repo_path": "en/recap-exercise-solution.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-28T14:40:35.000Z", "max_stars_repo_stars_event_min_datetime": "2015-02-09T08:07:12.000Z", "num_tokens": 519, "size": 2158 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{tikz} %\usetikzlibrary{external} %\tikzexternalize % activate! \usetikzlibrary{shapes,arrows} \usetikzlibrary{calc,positioning,shapes.geometric} \begin{document} \title{Diagrams for update assessment project plan} \author{Colin Millar and Arni Magnusson} \maketitle % ------------------------------------------------------------------------------ % ------------------------------------------------------------------------------ % preamble code / definitions % Define block styles \tikzstyle{object}=[draw, fill=blue!20, text width=5em, text centered, minimum height=2.5em] \tikzstyle{process} = [object, text width=10em, fill=red!20, minimum height=4em, rounded corners] \tikzstyle{db} = [object,cylinder,aspect=0.5,shape border rotate=90, fill=red!20] % Define layers \pgfdeclarelayer{background} \pgfsetlayers{background,main} % ------------------------------------------------------------------------------ % ------------------------------------------------------------------------------ \section{Shapes} \subsection{database} \begin{tikzpicture} \node[db] (datras) {DATRAS}; \end{tikzpicture} \subsection{object} \begin{tikzpicture} \node[object] (data) {Data}; \end{tikzpicture} \subsection{process} \begin{tikzpicture} \node[process] (model) {Model fitting}; \end{tikzpicture} % ------------------------------------------------------------------------------ % ------------------------------------------------------------------------------ \section{Structures} \subsection{Data flow to create input database} % Define distances for bordering \def\blockdist{3.2} \def\edgedist{0.5} \begin{tikzpicture} % central node \node (db_input) [db] {Input \\database \\(staging)}; % input nodes \path (db_input.west)+(-\blockdist,2.5) node (datras) [db] {DATRAS}; \path (db_input.west)+(-\blockdist,0.5) node (rdb)[db] {RDB}; \path (db_input.west)+(-\blockdist,-3.0) node (coordinator)[object] {Stock Coordinator}; % lines \path [draw, ->] (datras.east) -- node [above, sloped] {webservice} (db_input.160) ; \path [draw, ->] (rdb.east) -- node [above, sloped] {webservice} (db_input.180); \path [draw, ->] (coordinator.east) -- node [above, sloped, pos=0.35] {validation} (db_input.200); % output node \path (db_input.east)+(\blockdist,0) node (assessment) [object] {Input data}; % lines \path [draw, ->] (db_input.east) -- node [above] {webservice} (assessment.west); % box title \path (datras.north -| db_input.east) + (0,-0.2) node (sql06)[left] {SQL06}; % background \begin{pgfonlayer}{background} % define corners of rectangle \path (datras.north west)+(-\edgedist,\edgedist) node (a) {}; \path (db_input.south east)+(\edgedist,-\edgedist) node (b) {}; % draw background \path[fill=yellow!20,rounded corners, draw=black!50, dashed] (a) rectangle (b); \end{pgfonlayer} \end{tikzpicture} \subsection{Update assessment process} \begin{tikzpicture}[node distance = \blockdist cm, auto] % Place nodes \node [process] (modelfitting) {Model fitting}; \node [object, above left of=modelfitting] (assessment) {Input data}; \node [object, above right of=modelfitting] (model) {Model}; \node [object, below of=modelfitting] (output) {Output}; % Draw edges \path [draw, ->] (assessment) -- (modelfitting); \path [draw, ->] (model) -- (modelfitting); \path [draw, ->] (modelfitting) -- (output); \end{tikzpicture} \subsection{Data flow to create input database} % Define distances for bordering \def\blockdist{3.2} \def\edgedist{0.5} \begin{tikzpicture} % central node \node (db_input) [db] {Input \\database \\(staging)}; % input nodes \path (db_input.west)+(-\blockdist,2.5) node (datras) [db] {DATRAS}; \path (db_input.west)+(-\blockdist,0.5) node (rdb)[db] {RDB}; \path (db_input.west)+(-\blockdist,-3.0) node (coordinator)[object] {Stock Coordinator}; % lines \path [draw, ->] (datras.east) -- node [above, sloped] {webservice} (db_input.160) ; \path [draw, ->] (rdb.east) -- node [above, sloped] {webservice} (db_input.180); \path [draw, ->] (coordinator.east) -- node [above, sloped, pos=0.35] {validation} (db_input.200); % output node \path (db_input.east)+(\blockdist,0) node (assessment) [object] {Input data}; % lines \path [draw, ->] (db_input.east) -- node [above] {webservice} (assessment.west); % box title \path (datras.north -| db_input.east) + (0,-0.2) node (sql06)[left] {SQL06}; % background \begin{pgfonlayer}{background} % define corners of rectangle \path (datras.north west)+(-\edgedist,\edgedist) node (a) {}; \path (db_input.south east)+(\edgedist,-\edgedist) node (b) {}; % draw background \path[fill=yellow!20,rounded corners, draw=black!50, dashed] (a) rectangle (b); \end{pgfonlayer} \end{tikzpicture} \subsection{Combined process} \begin{tikzpicture}[node distance = \blockdist cm, auto] % Place nodes \node [process] (modelfitting) {Model fitting}; \node [object, above left of=modelfitting] (assessment) {Input data}; \node [object, above right of=modelfitting] (model) {Model}; \node [object, below of=modelfitting] (output) {Output}; % Draw edges \path [draw, ->] (assessment) -- (modelfitting); \path [draw, ->] (model) -- (modelfitting); \path [draw, ->] (modelfitting) -- (output); % box title \path (output.south -| model.east) + (0.4,-0.2) node (lamp)[left] {ArniColin1}; % background \begin{pgfonlayer}{background} % define corners of rectangle \path (assessment.north west)+(-\edgedist,\edgedist) node (a) {}; \path (output.south -| model.east)+(\edgedist,-\edgedist) node (b) {}; % draw background \path[fill=yellow!20,rounded corners, draw=black!50, dashed] (a) rectangle (b); \end{pgfonlayer} %% now for the input database links % central node \path (assessment.west)+(-\blockdist,0) node (db_input) [db] {Input \\database \\(staging)}; % output node %\path (db_input.east)+(\blockdist,0) node (assessment) [object] {Input data}; % lines \path [draw, ->] (db_input.east) -- node [above] {webservice} (assessment.west); % input nodes \path (db_input.west)+(-\blockdist,2.5) node (datras) [db] {DATRAS}; \path (db_input.west)+(-\blockdist,0.5) node (rdb)[db] {RDB}; \path (db_input.west)+(-\blockdist,-3.0) node (coordinator)[object] {Stock Coordinator}; % lines \path [draw, ->] (datras.east) -- node [above, sloped] {webservice} (db_input.160) ; \path [draw, ->] (rdb.east) -- node [above, sloped] {webservice} (db_input.180); \path [draw, ->] (coordinator.east) -- node [above, sloped, pos=0.35] {validation} (db_input.200); % box title \path (datras.north -| db_input.east) + (0,-0.2) node (sql06)[left] {SQL06}; % background \begin{pgfonlayer}{background} % define corners of rectangle \path (datras.north west)+(-\edgedist,\edgedist) node (a) {}; \path (db_input.south east)+(\edgedist,-\edgedist) node (b) {}; % draw background \path[fill=yellow!20,rounded corners, draw=black!50, dashed] (a) rectangle (b); \end{pgfonlayer} % join output \path [draw, bend left, ->] (output) to node [auto] {webservice?} (db_input); \end{tikzpicture} \end{document} %\begin{tikzpicture} % \node (wa) [process] {System Combination}; % \path (wa.west)+(-3.2,1.5) node (asr1) [object] {$ASR_1$}; % \path (wa.west)+(-3.2,0.5) node (asr2)[object] {$ASR_2$}; % \path (wa.west)+(-3.2,-1.0) node (dots)[ann] {$\vdots$}; % \path (wa.west)+(-3.2,-2.0) node (asr3)[object] {$ASR_N$}; % \path (wa.east)+(\blockdist,0) node (vote) [object] {$\theta_0,\theta_1,...,\theta_M$\\Estimated Parameters}; % % \path [draw, ->] (asr1.east) -- node [above] {} % (wa.160) ; % \path [draw, ->] (asr2.east) -- node [above] {} % (wa.180); % \path [draw, ->] (asr3.east) -- node [above] {} % (wa.200); % \path [draw, ->] (wa.east) -- node [above] {} % (vote.west); % % % \path (wa.south) +(0,-\blockdist) node (asrs) {System Combination - Training}; % % \begin{pgfonlayer}{background} % \path (asr1.west |- asr1.north)+(-0.5,0.3) node (a) {}; % \path (wa.south -| wa.east)+(+0.5,-0.3) node (b) {}; % \path (vote.east |- asrs.east)+(+0.5,-0.5) node (c) {}; % % \path[fill=yellow!20,rounded corners, draw=black!50, dashed] % (a) rectangle (c); % \path (asr1.north west)+(-0.2,0.2) node (a) {}; % % \end{pgfonlayer} % % % Validation Layer is the same except that there are a set of nodes and links which are added % % % \path (wa.south)+(-2.0,-7.5) node (syscomb) [sc] {\textbf{System Combination \\Algorithm}\\Estimated Parameters\\from training}; % \path (syscomb.west)+(-2.2,1.5) node (asrt1) [object] {$ASR_1$}; % \path (syscomb.west)+(-2.2,0.5) node (asrt2)[object] {$ASR_2$}; % \path (syscomb.west)+(-2.2,-1.0) node (dots)[ann] {$\vdots$}; % \path (syscomb.west)+(-2.2,-2.0) node (asrt3)[object] {$ASR_N$}; % % \path [draw, ->] (asrt1.east) -- node [above] {} % (syscomb.160) ; % \path [draw, ->] (asrt2.east) -- node [above] {} % (syscomb.180); % \path [draw, ->] (asrt3.east) -- node [above] {} % (syscomb.200); % % % \path (wa.south) +(0,-\blockdist) node (sct) {System Combination - Training}; % % % \path (syscomb.east)+(1.0,0.0) node (bwtn) {}; % % % Note how the single nodes are repeated using for loop % \foreach \x in {0,1,...,4} % { % \draw (bwtn.east)+(\x,0) node (asr\x-2)[]{}; % \fill (bwtn.east)+(\x,0) circle (0.1cm); % } % % \path [draw, ->] (syscomb.east) -- node [above] {} % (bwtn.east); % \path [draw, ->] (asr0-2) -- node [above] {@} % (asr1-2); % \path [draw, -] (asr1-2) -- node [above] {b} % (asr2-2); % \path [draw, -] (asr2-2) -- node [above] {z} % (asr3-2); % \path [draw, -] (asr3-2) -- node [above] {} % (asr4-2); % % \path [draw, ->] (asr0-2) edge[bend right] node [below] {@} % (asr1-2); % \path [draw, ->] (asr1-2) edge[bend right] node [below] {b} % (asr2-2); % \path [draw, ->] (asr2-2) edge[bend right] node [below] {c} % (asr3-2); % \path [draw, ->] (asr4-2) node[]{} (asr4-2)+(1.0,0); % % \begin{scope}[looseness=1.6] % \path [draw, ->] (asr0-2) edge[bend right=90] node [below] {a} % (asr1-2); % \path [draw, ->] (asr1-2) edge[bend right=90] node [below] {b} % (asr2-2); % \path [draw, ->] (asr2-2) edge[bend right=90] node [below] {c} % (asr3-2); % \end{scope} % \path (asr3-2.east)+(1.5,0.0) node (bw)[object] {Best Word Sequence\\$\arg\max$}; % % \path [draw, -] (asr1-2.east) node [below] {} % (bw.west); % % \begin{pgfonlayer}{background} % \path (asrt1.west)+(-0.5,1.0) node (g) {}; % \path (bw.east |- syscomb.south)+(0.5,-1.5) node (h) {}; % % \path[fill=yellow!20,rounded corners, draw=black!50, dashed] % (g) rectangle (h); % % \path [draw, ->] (vote.south) edge[bend left=90] node [below] {Used in validation} % (syscomb.30); % % \end{pgfonlayer} % % \path (asr1-2.south) +(-\blockdist,-\blockdist) % node (asrs) {System Combination - Validation}; % % \end{tikzpicture}
{ "alphanum_fraction": 0.5904583222, "avg_line_length": 30.75, "ext": "tex", "hexsha": "0910ddeb692b4b85036736f2dc67ee584d50c7dd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e3104efff6a200c37e555f98e0b32b3b6d87dd4d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "colinpmillar/ps_ragnarok", "max_forks_repo_path": "docs/flowchart/flowchart.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e3104efff6a200c37e555f98e0b32b3b6d87dd4d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "colinpmillar/ps_ragnarok", "max_issues_repo_path": "docs/flowchart/flowchart.tex", "max_line_length": 134, "max_stars_count": null, "max_stars_repo_head_hexsha": "e3104efff6a200c37e555f98e0b32b3b6d87dd4d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "colinpmillar/ps_ragnarok", "max_stars_repo_path": "docs/flowchart/flowchart.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3895, "size": 11193 }
\section{The \tlaplus\ Proof Language} \begin{frame} \frametitle{Assertions} \begin{itemize} \item \tc{dkblue}{Assertions state valid facts} \oo \tc{dkblue}{\AXIOM\ and \ASSUME\ assert unproved facts} \begin{itemize} \o \tlaps\ handles \ASSUME\ and \AXIOM\ identically \o \tlc\ checks \ASSUME{}d facts \end{itemize} \oo \tc{dkblue}{\THEOREM\ asserts that a fact is provable in the current context} \begin{itemize} \o the proof need not be given at once \o unproved theorems will be colored yellow in the toolbox \o \LEMMA\ and \PROPOSITION\ are synonyms of \THEOREM \end{itemize} \oo \tc{dkblue}{Facts can be named for future reference} \medskip \begin{tlablock}[.89] \THEOREM\ Fermat\ \deq\ \forall n \in Nat \setminus (0..2): \forall a,b,c \in Nat \setminus \{0\}: a^n + b^n \neq c^n \end{tlablock} \end{itemize} \end{frame} \begin{frame} \frametitle{Shape of Assertions} \begin{itemize} \item \tc{dkblue}{A \tlaplus\ assertion can be a formula or a logical sequent} \medskip \qquad\begin{tlablock}[.7] \qquad $F$ \qquad\qquad\text{or}\qquad\qquad \begin{array}{@{}l@{\ \ }l} \ASSUME & A_1, \ldots, A_n\\ \PROVE & F \end{array} \end{tlablock} \oo \tc{dkblue}{Shape of a sequent \ASSUME\ \ldots\ \PROVE} \begin{itemize} \o the conclusion $F$ is always a formula \o the assumptions $A_i$ can be \medskip \begin{tabular}{ll} declarations & \tc{dkgreen}{$\NEW\ msg \in Msgs$}\\ & (levels: \CONSTANT, \STATE, \ACTION, \TEMPORAL)\\[2mm] formulas & \tc{dkgreen}{$msg.type = \str{alert}$}\\[2mm] sequents & \tc{dkgreen}{\(\begin{array}[t]{@{}l@{\ \ }l} \ASSUME & \NEW\ msg \in Msgs,\ msg.type = \str{alert}\\ \PROVE & msg \in Alarm \end{array}\)} \end{tabular} \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Nested \ASSUME\ \ldots\ \PROVE} \begin{itemize} \item \tc{dkblue}{Useful for writing proof rules} \bigskip \begin{tlablock} \THEOREM\ ForallIntro\ \deq \begin{array}[t]{l@{\ \ }l} \ASSUME & \NEW\ P(\_),\\ & \ASSUME\ \ \NEW\ y\ \ \PROVE\ \ P(y)\\ \PROVE & \A x : P(x) \end{array} \end{tlablock} \bigskip \item \tc{dkblue}{Nested \ASSUME\ \ldots\ \PROVE\ encodes freshness of $y$} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Rules in \tlaplus} \centerline{\begin{tlablock} \THEOREM\ RuleINV1\ \deq\ \begin{array}[t]{@{}l@{\ \ }l} \ASSUME & \STATE\ I,\ \STATE\ v,\ \ACTION\ N,\\ & I \land [N]_v \implies I'\\ \PROVE & I \land \Box[N]_v \implies \Box I \end{array} \end{tlablock}} \begin{itemize} \oo \tc{dkblue}{Validity of conclusion follows from validity of hypotheses} \begin{itemize} \o given a substitution of the declared identifiers by expressions of the declared or lower level \o if all hypotheses are provable in the current context then the instance of the conclusion may be concluded \end{itemize} \pause \oo \tc{dkblue}{Constant-level rules may be instantiated at any level} \smallskip \centerline{\begin{tlablock} \THEOREM\ Substitutivity\ \deq\ \begin{array}[t]{@{}l@{\ \ }l} \ASSUME & \NEW\ x,\ \NEW\ y,\ \NEW\ P(\_),\\ & x=y\\ \PROVE & P(x) \biimplies P(y) \end{array} \end{tlablock}} \begin{itemize} \o expression instantiating $P(\_)$ must satisfy Leibniz condition \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Structure of \tlaplus\ Proofs} \begin{itemize} \item \tc{dkblue}{Proofs are either leaf proofs \ldots} \smallskip \qquad\begin{tlablock} \LEMMA\ \ Init \implies InductiveInvariant\\ \BY\ Positive\ \DEFS\ Init, InductiveInvariant \end{tlablock} \pause \oo \tc{dkblue}{\ldots\ or sequences of assertions followed by \QED} \smallskip \qquad\begin{tlablock} \begin{array}{@{}l@{\ \ }l@{\ }l} \ps{1}{a.} & \CASE & x<y\\ \ps{1}{b.} & \CASE & x>y\\ \ps{1}{q.} & \QED & \BY\ \ps{1}{a}, \ps{1}{b} \end{array} \end{tlablock} \begin{itemize} \o \makebox[8.5cm][l]{every step of a proof has the same \alert{level number}} \tc{dkgreen}{$\ps{1}{}$} \o \makebox[8.5cm][l]{and may be named for future reference} $\ps{1}{\tc{dkgreen}{a.}}$ \o \QED\ step: the assertion follows from the preceding facts \o each step recursively has a proof\ \ $\leadsto$\ \ proof tree \o proof step with higher level number starts subproof \end{itemize} \oo \tc{dkblue}{Proofs are best developed per level (check only \QED\ step)} \end{itemize} \end{frame} \begin{frame} \frametitle{Leaf Proofs} \begin{itemize} \item \tc{dkblue}{Elementary steps: assertion follows by ``simple reasoning''} \medskip \begin{tlablock} \BY\ e_1, \ldots, e_m\ \DEFS\ d_1, \ldots, d_n \end{tlablock} \begin{itemize} \o \tc{dkgreen}{$e_1, \ldots, e_m$} : known facts (assumptions, theorems, previous steps) \o formulas implied by known facts may also appear among $e_i$ \o \tc{dkgreen}{$d_1, \ldots, d_n$} : operator names whose definitions should be expanded \o citation of facts and definitions limits size of proof obligations \o \tc{dkgreen}{\OBVIOUS} : assertion follows without use of extra facts \end{itemize} \pause \oo \tc{dkblue}{Checking leaf proofs in \tlaps} \begin{itemize} \o verify that $e_1, \ldots, e_m$ are provable in current context \o expand the definitions of $d_1, \ldots, d_n$ \o pass obligation to a prover (default: Zenon, then Isabelle) \o some ``facts'' specify a prover backend, e.g. $SimpleArithmetic$ \end{itemize} \oo \alert{\tlaps\ is independent of axiomatic systems and theorem provers} \end{itemize} \end{frame} \begin{frame} \frametitle{Known and Usable Facts and Definitions} \begin{itemize} \item \tc{dkblue}{Scoping and context} \begin{itemize} \o obvious scope rules determine current context \o context contains known declarations, facts, and definitions \o assertions state that a fact is provable in the current context \end{itemize} \oo \tc{dkblue}{Usable facts and definitions} \begin{itemize} \o \alert{usable facts/definitions} : passed to backend provers \o facts and definitions must normally be cited explicitly in \BY \o \tc{dkgreen}{$\USE\ e_1,\ldots,e_m\ \DEFS\ d_1,\ldots,d_n$}\ \ makes facts usable within scope \o domain facts\ \ \tc{dkgreen}{$x \in S$}\ \ are usable by default \o facts stated in unnamed steps are usable by default \o definitions introduced within a proof are usable by default \o definitions of theorem names are usable by default \o \tc{dkgreen}{$\HIDE\ e_1,\ldots,e_m\ \DEFS\ d_1,\ldots,d_n$}\ \ is the opposite of $\USE$ \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: Assertions} \qquad\begin{tlablock} \ps{4}{3.}\ \ \begin{noj2} \ASSUME & \NEW\ x \in S,\ x > y,\ P(y)\\ \PROVE & \E w \in S: x\ |\ w+y \end{noj2} \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Assertions in a proof are analogous to \THEOREM\ statements} \begin{itemize} \o assumptions are added to known facts \o formula after \PROVE\ becomes current goal \o \ASSUME{}d facts are automatically used if the step has a leaf proof \end{itemize} \oo \tc{dkblue}{References to proof steps} \medskip \begin{tlablock} \ps{4}{3.}\ \ \begin{noj2} \ASSUME & \NEW\ x \in S,\ x > y,\ P(y)\\ \PROVE & \E w \in S: x\ |\ w+y \end{noj2}\\ \quad\ps{5}{1.}\ \ Q(x,y)\\ \quad\quad\BY\ \alert{\ps{4}{3}}\hspace*{1.9cm} \only<2>{\raisebox{0cm}[0pt][0pt]{\begin{minipage}{6cm} \begin{beamercolorbox}[rounded=true,shadow=true]{postit}\footnotesize within proof, denotes assumptions of $\ps{4}{3}$ \end{beamercolorbox} \end{minipage}}}\\ \quad\ps{5}{2.}\ \ \QED\\ \quad\quad\BY\ \ps{5}{2}\ \DEFS\ P,Q\\ \ps{4}{4.}\ \ \E w \in S: u\ |\ w+y\\ \quad\BY\ u \in S, \ps{3}{5}, \alert{\ps{4}{3}}\hspace*{.6cm} \only<2>{\raisebox{0cm}[0pt][0pt]{\begin{minipage}{6cm} \begin{beamercolorbox}[rounded=true,shadow=true]{postit}\footnotesize outside proof, denotes entire sequent $\ps{4}{3}$ \end{beamercolorbox} \end{minipage}}}\\ \end{tlablock} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: \CASE} \qquad\begin{tlablock} \ps{3}{4.}\ \CASE\ x<0\\ \ps{3}{5.}\ \CASE\ x=0\\ \ps{3}{6.}\ \CASE\ x>0\\ \ps{3}{7.}\ \QED\\ \quad\BY\ \ps{3}{4},\ \ps{3}{5},\ \ps{3}{6},\ x \in Real \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Prove current goal under additional hypothesis} \begin{itemize} \o current goal remains unchanged \o \CASE\ assumption is added to the known facts \o references to \CASE\ step within the proof refer to assumption \o equivalent to\quad\tlabox{\ps{3}{4.}\ \ASSUME\ x<0\ \PROVE\ G}\hfill {\footnotesize ($G$: current goal)} \end{itemize} \oo \tc{dkblue}{Later, must show that the case distinction is exhaustive} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: \SUFFICES} \qquad\begin{tlablock} \ps{2}{6.}\ \A x \in S: P(x) \implies Q(x,y)\\ \quad\ps{3}{1.}\ \SUFFICES \begin{array}[t]{l@{\ \ }l} \ASSUME & \NEW\ x \in S,\ P(x),\ \lnot Q(x,y)\\ \PROVE & Q(x,y) \end{array}\\ \quad\quad\OBVIOUS \end{tlablock} \begin{itemize} \oo \tc{dkblue}{\tlaplus\ proofs are normally written in ``forward style''} \oo \tc{dkblue}{\SUFFICES\ steps introduce backward chaining} \begin{itemize} \o reduce current goal to assertion claimed after \SUFFICES \o proof shows that new assertion implies the current goal \o assumption is usable within that proof \o frequently used to restate goal in more perspicuous form \end{itemize} \oo \tc{dkblue}{\SUFFICES\ steps modify the current goal} \begin{itemize} \o conclusion of \SUFFICES\ becomes current goal (proved by \QED) \o references to $\ps{3}{1}$ within remaining scope denote assumptions \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: \HAVE} \qquad\begin{tlablock} \ps{3}{5.}\ x+y > 0\ \implies\ x > -y\\ \quad\ps{4}{1.}\ \HAVE\ x \in Real \land y \in Real \land x+y > 0 \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Proof of implications} \begin{itemize} \o current goal must be of the form\ \ \tc{dkgreen}{$H \implies G$} \o formula after \HAVE\ must follow easily from $H$ and known facts \o $G$ becomes the current goal \o \HAVE\ steps take no proof \end{itemize} \oo \tc{dkblue}{In this context, $\HAVE\ F$ is a shorthand for} \medskip \begin{tlablock} \SUFFICES \begin{array}[t]{l@{\ \ }l} \ASSUME & F\\ \PROVE & G \end{array}\\ \quad\OBVIOUS \end{tlablock} \end{itemize} \vfill\vfill \end{frame} \begin{frame} \frametitle{Proof Steps: \TAKE} \qquad\begin{tlablock} \ps{3}{7.}\ \A x,y \in S, z \in T : G\\ \quad\ps{4}{1.}\ \TAKE\ x,y \in S, z \in T \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Proof of universally quantified formulas} \begin{itemize} \o current goal must be (trivially equivalent to)\ \ \tc{dkgreen}{$\A \tau: G$} \o $\TAKE\ \tau$\ \ introduces new constant declarations \o $G$ becomes the current goal \o \TAKE\ steps have no proof \end{itemize} \oo \tc{dkblue}{$\TAKE\ x,y \in S, z \in T$\ \ is shorthand for} \medskip \begin{tlablock} \SUFFICES \begin{array}[t]{l@{\ \ }l} \ASSUME & \NEW\ x \in S,\ \NEW\ y \in S,\ \NEW\ z \in T\\ \PROVE & G \end{array}\\ \quad\OBVIOUS \end{tlablock} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: \WITNESS} \qquad\begin{tlablock} \ps{2}{6.}\ \E x \in S,\ y \in T : F(x,y)\\ \quad\ldots\\ \quad\ps{3}{10.}\ \WITNESS\ Maximum(M) \in S,\ Minimum(M) \in T \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Proof of existentially quantified formulas} \begin{itemize} \o current goal must be (trivially equivalent to)\ \ \tc{dkgreen}{$\E \tau: G$} \o \WITNESS\ specifies terms for each quantified variable \o domain facts corresponding to bounded quantifiers easily provable \o corresponding instance of $G$ becomes the current goal \o \WITNESS\ steps take no proof \end{itemize} \oo \tc{dkblue}{The above \WITNESS\ step is shorthand for} \medskip \begin{tlablock} \ps{3}{10.}\ \SUFFICES\ F(Maximum(M), Minimum(M))\\ \quad\ps{4}{1.}\ Maximum(M) \in S\quad\OBVIOUS\\ \quad\ps{4}{2.}\ Minimum(M) \in T\quad\OBVIOUS\\ \quad\ps{4}{3.}\ \QED\ \BY\ \ONLY\ \ps{4}{1},\ \ps{4}{2} \end{tlablock} \end{itemize} \end{frame} \begin{frame} \frametitle{Proof Steps: \PICK} \qquad\begin{tlablock} \ps{3}{3.}\ \PICK\ x \in S,\ y \in T : P(x,y)\\ \quad\BY\ m+n \in S, 0 \in T \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Make use of existentially quantified formulas} \begin{itemize} \o proof of \PICK\ step shows existence of suitable values \o declarations of constants asserted by \PICK\ are added to the context \o body of \PICK\ is added to known facts (usable if step unnamed) \o the goal is unchanged \end{itemize} \oo \tc{dkblue}{The above \PICK\ step is shorthand for} \medskip \begin{tlablock} \ps{3}{3a.}\ \E x \in S,\ y \in T : P(x,y)\\ \quad\BY\ m+n \in S, 0 \in T\\ \ps{3}{3.}\ \ \SUFFICES \begin{array}[t]{l@{\ \ }l} \ASSUME & \NEW\ x \in S,\ \NEW\ y \in T,\\ & P(x,y)\\ \PROVE & G \end{array} \end{tlablock} \end{itemize} \end{frame} \begin{frame} \frametitle{Pseudo Proof Steps: \DEFINE,\ \USE\ and \HIDE} \qquad\begin{tlablock} \ps{3}{.}\ \USE\ \ps{2}{1},\ n>0\ \DEFS\ Invariant,\ Next\\[2mm] \ps{3}{.}\ \DEFINE\ Aux(x)\ \deq\ \ldots\\[2mm] \ps{3}{.}\ \HIDE\ \DEF\ Aux \end{tlablock} \begin{itemize} \oo \tc{dkblue}{Manage set of usable facts} \begin{itemize} \o \USE\ : make known facts usable, avoiding explicit citation \o \DEFINE\ : introduce local definitions in proofs \o \HIDE\ : remove assertions and definitions from set of usable facts \end{itemize} \oo \tc{dkblue}{\USE\ and \HIDE\ : use sparingly} \begin{itemize} \o more concise proof scripts, but at the expense of clarity \o usually prefer explicit citation of facts and definitions \end{itemize} \oo \tc{dkblue}{\DEFINE\ : frequently useful for good proof structure} \begin{itemize} \o abbreviate recurring expressions \o mirror \kw{let} definitions in specifications \o \alert{NB :} local definitions are usable by default\ $\leadsto$\ use \HIDE \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Architecture of \tlaps} \centerline{\includegraphics[width=1.0\linewidth]{architecture}} \end{frame} \begin{frame} \frametitle{Proof Manager} \begin{itemize} \item \tc{dkblue}{Interpret \tlaplus\ proof language} \begin{itemize} \o interpret module structure (imports and instantiations) \o manage context: known and usable facts and definitions \o expand operator definitions if they are usable \end{itemize} \oo \tc{dkblue}{Rewrite proof obligations to constant level} \begin{itemize} \o handle primed expressions such as\ \ \tc{dkgreen}{$Inv'$} \o distribute prime over (constant-level) operators \o introduce distinct symbols \tc{dkgreen}{$e$} and \tc{dkgreen}{$e'$} for atomic state expression \tc{dkgreen}{$e$} \end{itemize} \oo \tc{dkblue}{Invoke backend provers} \begin{itemize} \o user may explicitly indicate which proof method to apply \o optionally: certify backend proof \end{itemize} \end{itemize} \end{frame} %%% Local Variables: %%% mode: latex %%% TeX-master: "tutorial" %%% End:
{ "alphanum_fraction": 0.6160306737, "avg_line_length": 28.8769771529, "ext": "tex", "hexsha": "ab238c191f116bef9799f0c11ac6fee51f5cec21", "lang": "TeX", "max_forks_count": 12, "max_forks_repo_forks_event_max_datetime": "2021-11-12T22:18:25.000Z", "max_forks_repo_forks_event_min_datetime": "2020-02-26T19:58:37.000Z", "max_forks_repo_head_hexsha": "13a1993263642092a521ac046c11e3cb5fbcbc8b", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "damiendoligez/tlapm", "max_forks_repo_path": "doc/presentations/2010-ifm/language.tex", "max_issues_count": 49, "max_issues_repo_head_hexsha": "13a1993263642092a521ac046c11e3cb5fbcbc8b", "max_issues_repo_issues_event_max_datetime": "2022-02-07T17:43:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-04T18:13:13.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "damiendoligez/tlapm", "max_issues_repo_path": "doc/presentations/2010-ifm/language.tex", "max_line_length": 123, "max_stars_count": 31, "max_stars_repo_head_hexsha": "13a1993263642092a521ac046c11e3cb5fbcbc8b", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "damiendoligez/tlapm", "max_stars_repo_path": "doc/presentations/2010-ifm/language.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-19T18:38:07.000Z", "max_stars_repo_stars_event_min_datetime": "2016-08-16T14:58:40.000Z", "num_tokens": 5755, "size": 16431 }
% % Appendix A % \chapter{SVFit Mass} \label{SVFit} A study has been performed to improve the next iteration of the search for LFV decays of the Higgs boson. In this study, we have observed that using the ``Classic'' SVFit (Sensitivity Volume Fit) algorithm can help improve the signal resolution compared to the collinear mass resolution~\cite{Bianchini:2016yrt}. The ``Classic'' SVFit algorithm has been developed to reconstruct the mass of a Higgs boson decaying into two tau leptons \mtt and is an improved version of the SVFit algorithm. The SVfit algorithm~\cite{Bianchini:2014vza} has been used to reconstruct the Higgs boson mass in the SM \Htt analysis and searches for further Higgs bosons predicted by models beyond the SM performed by the CMS collaboration during LHC Run 1. Compared to alternative mass variables, the SVfit algorithm's usage has improved the SM \Htt analysis's sensitivity for measuring the signal rate by $\approx 40\%$~\cite{Chatrchyan:2014nva}. The improvement in sensitivity corresponds to a gain by about a factor of two in the integrated luminosity of the analyzed dataset. The ``Classic'' SVFit algorithm uses a likelihood function of arbitrary normalization. The algorithm allows for the reconstruction of not only the mass \mtt of the tau lepton pair but any kinematic function of the two tau leptons, including the \pt, $\eta$, and $\phi$ and transverse mass of the tau lepton pair. A further improvement concerns the algorithm's extension to account for the experimental resolution on the reconstruction of hadrons produced in the tau decays. The ``Classic'' SVFit algorithm was modified to reconstruct the mass of the Higgs boson decaying into LFV decay modes \mlt. A brief description of the algorithm is given in the following paragraphs. \section{``Classic'' SVfit algorithm} As only one of the lepton from an LFV Higgs decay is a tau, it was modified so that the matrix element of the probability density function defined can reconstruct the Higgs mass \mlt. The phase space of the tau decay products along with the angles $\theta_{inv}$ and $\phi_{inv}$ are illustrated in Figure~\ref{fig:sv}. The $\bp_{inv}$ vector is located on the surface of a cone, the axis of which is given by the $\bp_{vis}$ vector. The variable $\phi_{inv}$ represents the angle of rotation, in a counter-clockwise direction, around the cone's axis. The value $\phi_{inv} = 0$ is chosen to correspond to the case that the $\bp_{inv}$ vector is within the plane spanned by the $\bp_{vis}$ vector and the beam direction. \begin{equation} \begin{aligned} \mathcal{L}\left(\bp^{\text{vis}} ; p_{\text{x}}^{\text{rec}}, p_{\text{y}}^{\text{rec}} \mid \mh \right)=\frac{32 \pi^{4}}{s} \int \text{d} \mh \text{d} \Phi_{n} \left|\text{BW}_{\Pgt}\right|^{2} \left|\mathcal{M}_{\Pgt \rightarrow \ldots}(\tilde{\bp})\right|^{2} \\ W\left(\bp^{\text{vis}} \mid \hat{\bp}^{\text{vis}}\right) W_{\text{rec}}\left(p_{\text{x}}^{\text{rec}}, p_{\text{y}}^{\text{rec}} \mid \hat{p}_{\text{x}}^{\text{rec}}, \hat{p}_{\text{y}}^{\text{rec}} \right) \mathcal{F}(\bp) \end{aligned} \end{equation} \begin{figure*}[!htpb] \centering \includegraphics[width=0.9\textwidth]{plots/appendix/SV.png} \caption{Illustration of the variables $\theta_{inv}$ and $\phi_{inv}$ that specify the orientation of the $\bp_{inv}$ vector relative to the momentum vector $\bp_{vis}$ of the visible tau decay products.} \label{fig:sv} \end{figure*} The function $\mathcal{F}(\bp)$ in the integrand may be an arbitrary function of the momenta of the prompt and tau leptons. The integral is evaluated numerically, using a custom implementation of the Markov chain MC integration method with the Metropolis-Hastings algorithm~\cite{Hastings:1970aa}. The actual value $\mathcal{L}(y)$ of the integral is irrelevant. The reconstruction of the mass \mlt of the prompt and tau lepton pair is based on choosing: % $\mathcal{F}(\bp) \equiv \left(\hat{E}_{\ell} + \hat{E}_{\Pgt}\right)^{2} - \left(\left(\hat{p}_{\text{x}}^{\ell}+\hat{p}_{\text{x}}^{\Pgt}\right)^{2}+\left(\hat{p}_{\text{y}}^{\ell}+\hat{p}_{\text{y}}^{\Pgt}\right)^{2}+\left(\hat{p}_{\text{z}}^{\ell}+\hat{p}_{\text{z}}^{\Pgt}\right)^{2}\right)$, % recording the values of $\mathcal{F}(\bp)$ for each evaluation of the integrand by the Markov chain and taking the median of the series of $\mathcal{F}(\bp)$ values as the best estimate \mlt for the mass of the prompt and tau lepton pair in a given event. The total number of evaluations of the integrand referred to as Markov chain ``states'', amounts to 100000 per event. The first 10000 evaluations of the integrand are used as a ``burn-in'' period and are excluded from the median's computation. Figure~\ref{fig:svfit} shows the collinear mass and SVFit mass distributions. The distributions are fit with a double Gaussian, and the goodness of fit as tested from $\chi^2/\text{ndof}$ is close to one, suggesting that it is a good fit. The full width at half maximum is used for inferring the signal resolution, and it has a value of 38.88~\GeV for collinear mass, while for SVFit mass, the value is 30.90~\GeV. This corresponds to a 20\% improvement in signal resolution. The collinear mass and SVFit mass are peaking close to the Higgs mass of 125~\GeV. The collinear mass has a higher mean of $\sim 129 \GeV$ due to its larger tail, while SVFit mass has a mean of $\sim 117 \GeV$ due to it's smaller tail distribution. \begin{figure*}[!htpb] \centering \includegraphics[width=0.45\textwidth]{plots/appendix/CollMass.png} \includegraphics[width=0.45\textwidth]{plots/appendix/SVFit.png} \caption{Collinear mass vs SVFit mass.} \label{fig:svfit} \end{figure*} This study has shown that mass resolution is significantly improved by using the SVFit mass instead of the collinear mass, and it can give much more sensitive results for the \Hmt and \Het searches. A future search using the full Run 3 data can benefit from this improved sensitivity, and the SVFit can easily be extended to be used for heavy Higgs boson searches. The only disadvantage that was noted during the study was from the computing point of view. It takes an order of magnitude longer time to compute the SVFit mass than the collinear mass, which has to be investigated and improved in any future analysis.
{ "alphanum_fraction": 0.7490090376, "avg_line_length": 137.1086956522, "ext": "tex", "hexsha": "67cd25dd95e35c7d47962687e9bfc1610d1ed0d3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9a7a4ae447331fb76b458374b9a3511298df309d", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "psiddire/nddiss", "max_forks_repo_path": "thesis/appendixA.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9a7a4ae447331fb76b458374b9a3511298df309d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "psiddire/nddiss", "max_issues_repo_path": "thesis/appendixA.tex", "max_line_length": 1057, "max_stars_count": null, "max_stars_repo_head_hexsha": "9a7a4ae447331fb76b458374b9a3511298df309d", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "psiddire/nddiss", "max_stars_repo_path": "thesis/appendixA.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1743, "size": 6307 }
\documentclass[conference]{IEEEtran} \IEEEoverridecommandlockouts \usepackage{cite} \usepackage{amsmath,amssymb,amsfonts} \usepackage{algorithmic} \usepackage{graphicx} \usepackage{textcomp} \usepackage{xcolor} \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} \begin{document} \title{ Cosmic\\A Software Simulated 8-Bit Computer} \author{\IEEEauthorblockN{Clay Buxton} \IEEEauthorblockA{\textit{Computer Engineering, Computer Science} \\ \textit{Elizabethtown College}\\ Elizabethtown, PA \\ [email protected]} \and \IEEEauthorblockN{Kevin Carman} \IEEEauthorblockA{\textit{Computer Engineering, Computer Science} \\ \textit{Elizabethtown College}\\ Elizabethtown, PA \\ [email protected]} } \maketitle \begin{abstract} Cosmic is a simulated 8-bit microcomputer architecture , designed to resemble retro computers. Cosmic's goal is to simulate, completely in software but with hardware design in mind, a microprocessor from the early 1980s. \end{abstract} \begin{IEEEkeywords} retro-computing, simulation, emulation, microprocessor, architecture. \end{IEEEkeywords} \section{Introduction} Cosmic can be broken down into four main parts: \begin{itemize} \item \textbf{Cosmic Processor} - The simulated CPU used to drive the rest of the system. The Cosmic CPU design is comparable to the Zilog Z80 and the MOS 6502. \item \textbf{Cosmic System} - Everything that is found on the main board of a similar physical machine such as memory, input \& output, graphics, and audio. This also will fit in with appropriate machines of the time like the Apple II. Our primary goals are things like RAM, ROM, keyboard, and graphics while our stretch goals are I/O, audio, and expansion peripherals. \item \textbf{Cosmic Software} - Software that will run on the cosmic system. Base goals are a simple kernel, with appropriate drivers, some form of command interpreter, basic utilities, and a simple game or two. Our stretch goals are a BASIC interpreter, a library that reflects coreutils, and a port of a popular game of the time. \item \textbf{Auxiliary Software} - Software written to run on a modern computer. This will consist of the simulation environment along with an assembler and user-friendly panel. \end{itemize} The project will be deemed "acceptable" once the processor and auxiliary software are complete, and the base goals of the system and software portions have been met. Once finished, the simulated system will operate similarly to a computer of the early 1980s, primarily the Apple II. The design philosophy of the computer is to create a hardware-like machine. The software will be engineered in a way that will reflect the hardware it is based around, without having to deal with the nuances of a physical machine. Features like chip pin-out, buses, and a physical board will be taken into account while things like timing will be disregarded. Neither team member is taking the direct lead on any specific part of the project at the moment, though we will work on things independently as the project progresses and there are more tasks available. As a group of two with a project that has many facets, there should be very little overlap. \section{Background} The largest source of background information is in the machines that Cosmic is designed to resemble. As previously mentioned, the two primary reference devices are the Apple II and the TRS-80 containing the MOS 6502 and the Zilog Z80, respectively. There are a few other projects out there that are similar to what we are doing. Emulators of similar devices will be helpful for design inspiration. Unlike emulators, we will not have the same design constraints which adds a sense of freedom when creating the system. Outside of emulators, there are a few projects where developers have created self-defined simulated platforms. One such is project is an 8-bit Assembler and Simulator written by Marco "Schweigi" Schweighauser\cite{b1}. This project is similar to our first step of creating a processor and an assembler for the Cosmic Processor. Another project that influenced the creation of Cosmic is David Murray's Commander X16 project\cite{b2}. Unlike the Assembler-Simulator, this is a "home-brew" physical computer made using off-the-shelf parts. This project uses the 6502 and other components to create a retro feeling machine with more modern parts. Luckily, devices of the time have been very well documented in the past 40 years. This documentation makes reverse-engineering the systems and processors simple and allows us to figure out how engineers of yesterday solved problems that we may encounter while designing our system. \begin{thebibliography}{00} \bibitem{b1} Schweighauser, M. (2019). Schweigi/assembler-simulator. [online] GitHub. Available at: https://github.com/Schweigi/assembler-simulator [Accessed 7 Sep. 2019]. \bibitem{b2}GitHub. (2019). Commander X16. [online] Available at: https://github.com/commanderx16 [Accessed 13 Sep. 2019]. \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.7913965822, "avg_line_length": 71.7042253521, "ext": "tex", "hexsha": "820330ee7405830773853f29b68829511580f1f3", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2020-06-03T07:07:40.000Z", "max_forks_repo_forks_event_min_datetime": "2019-12-19T02:23:23.000Z", "max_forks_repo_head_hexsha": "307e2bda397effcd5bd430319d20ec72186342f4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Abhisheknishant/Cosmic", "max_forks_repo_path": "doc/Inital Report.tex", "max_issues_count": 54, "max_issues_repo_head_hexsha": "307e2bda397effcd5bd430319d20ec72186342f4", "max_issues_repo_issues_event_max_datetime": "2021-11-19T18:36:52.000Z", "max_issues_repo_issues_event_min_datetime": "2019-11-09T02:17:12.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Abhisheknishant/Cosmic", "max_issues_repo_path": "doc/Inital Report.tex", "max_line_length": 912, "max_stars_count": 108, "max_stars_repo_head_hexsha": "307e2bda397effcd5bd430319d20ec72186342f4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Abhisheknishant/Cosmic", "max_stars_repo_path": "doc/Inital Report.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-22T20:08:39.000Z", "max_stars_repo_stars_event_min_datetime": "2019-09-28T06:32:00.000Z", "num_tokens": 1226, "size": 5091 }
\documentclass[11pt]{article} \usepackage{amsmath,amssymb,amsthm, logicproof} \usepackage[margin=2.75cm]{geometry} \usepackage{multicol} \newcommand{\encode}[1]{\langle #1 \rangle} \title{\bf Predicate Logic\\Quantifier Rules\\[2ex] \rm\normalsize CS251 at CCUT, Spring 2017 \\} \date{May 8$^{th}$, 2017} \author{David Lu} \begin{document} \maketitle \paragraph{Contents} \begin{enumerate} \item Universal Instantiation (UI) \item Existential Generalization (EG) \item Universal Generalization (UG) \item Existential Instantiation (EI) \item Quantifier Negation (QN) and Quantifier Equivalence (QE) \item Multiple Quantification \end{enumerate} \paragraph{1. Universal Instantiation/Elimination UI} If X is a universally quantified sentence, then you are licensed to conclude any of its substitution instances below it. Let $s$ be any constant, $P$ be a predicate, and $u$ be any variable. The natural deduction rule UI can be expressed as follows: \begin{logicproof}{1} \forall uP(...u...)\\ ...\\ P(...s...) & UI \end{logicproof} This rule, in short, allows us to eliminate the universal quantifier of a universally quantified sentence and substitute any constant we'd like for instances of the variable that it bound. The parenthetical notation in the argument place of the predicate denotes that the expression may be complex and not a simple subject predicate sentence.\\ Here's an example: Everyone loves Eve. Therefore Adam loves Eve. \begin{logicproof}{1} \forall x Lxe & Premise \\ Lae & 1, UI \end{logicproof} In forming the substitution instance of a universally quantified sentence, you must be careful always to put the same name everywhere for the substituted variable. Substituting $a$ for $x$ in $\forall xLxx$, we get $Laa$, not $Lxa$.\\ Here's another example: All humans are mortal. Socrates is a human. Thus, Socrates is mortal. \begin{logicproof}{1} \forall x(Hx \rightarrow Mx) & Premise \\ Hs & Premise \\ Hs \rightarrow Ms & 1, UI \\ Ms & 2, 3 MP \end{logicproof} Notice that the universal quantifier in the first premise binds two instances of $x$ in the sentence. So when we use UI at line 3, both must be replaced by our chosen constant. \paragraph{2. Existential Generalization/Introduction EG} Intuitively, from a closed sentence with a constant, we are licensed to infer the existential generalization of that sentence, where $\exists xPx$ is an existential generalization of $Pa$. The natural deduction rule EG can be expressed as follows: \begin{logicproof}{1} P(...s...) \\ ...\\ \exists x P(...x...) & EG \end{logicproof} From a non-quantified sentence, which contains the constant $s$, we are allowed to take out one or more of the occurrences of $s$ and substitute an existentially bound variable. Example: Rover loves to wag his tail. Therefore, something loves to wag its tail. \begin{logicproof}{1} Wr & Premise\\ \exists x Wx & 1, EG \end{logicproof} Here's another example: Everyone is happy. Therefore, someone is happy. \begin{logicproof}{1} \forall xHx & Premise\\ Ha & 1, UI\\ \exists x Hx & 2, EG \end{logicproof} \newpage \paragraph{3. Universal Generalization/Introduction UG} The intuitive idea for universal introduction is that if a constant, as it occurs in a sentence, is completely arbitrary, you can universally generalize on that constant. This means that you can rewrite the sentence with a variable written in for all occurrences of the arbitrary constant, all bound by a universal quantifier. If I can show that an arbitrary element of set A is also an element of set B, then I am licensed to infer that every element of A is an element of B.\\ There are a number of ways to state the UG rule such that the restriction that our constant is arbitrary is satisfied. Here's one way: \begin{logicproof}{1} P(...s...) & ($s$ must name an arbitrary individual)\\ ...\\ \forall x P(...x...) & 1, UG \end{logicproof} To say that $s$ names an arbitrary individual puts a restriction on what constants we are allowed to universally generalize upon. In particular, $s$ may not appear in the premises and $s$ may not come from the result of a use of EI. Further, every instance of $s$ in the sentence must be replaced by a variable when we use the rule UG.\\ Here's an example of the mistake above: Everyone loves themself. Therefore, everyone loves Alice. \begin{logicproof}{1} \forall x Lxx & Premise\\ Laa & 1, UI\\ \forall x Lxa & 2, UG (Mistake!) \end{logicproof} Here is an example of a mistake in not generalizing upon an arbitrary individual: Doug is good at logic. Therefore, everyone is good at logic. \begin{logicproof}{1} Gd & Premise\\ \forall x Gx & 1, UG (Mistake!) \end{logicproof} Here's a somewhat longer example: All birds have feathers. Only birds fly. Therefore, only feathered things fly. \begin{logicproof}{2} \forall x (Bx \rightarrow Fx) & Premise \\ \forall x(\neg Bx \rightarrow \neg Lx) & Premise \\ Ba \rightarrow Fa & 1, UI \\ \neg Ba \rightarrow \neg La & 2, UI \\ La \rightarrow Ba & 4, Contra \\ La \rightarrow Fa & 3, 5 HS \\ \neg Fa \rightarrow \neg La & 6, Contra \\ \forall x(\neg Fx \rightarrow \neg Lx) & 7, UG \end{logicproof} Notice that the constant $a$ in the proof above does not appear in the premises or as the result of an existential instantiation. So $a$ names an arbitrary individual, satisfying the restriction on on our use of UG at line 8. \newpage \paragraph{4. Existential Instantiation/Elimination EI} The following argument is intuitively valid: All lions are cats. Some lions roar. Therefore, some cats roar. \begin{logicproof}{1} \forall x(Lx \rightarrow Cx) & Premise \\ \exists x (Lx \land Rx) & Premise \\ \exists x (Cx \land Rx) & Conclusion \end{logicproof} We have no rule yet for exploiting the existential premise. Our reasoning ought to go something like this: Suppose Simba is a lion that roars. Since all lions are cats, Simba must be a cat that roars. So there exists a cat that roars.\\ There are a couple of ways to implement the EI rule. In my informal reasoning above, I asked the reader to suppose that some individual named Simba was a lion that roars. Importantly, Simba may, or may not, exist. So any conclusions we draw from our reasoning, cannot include conclusions about Simba. So we might implement our EI rule as a sub-derivation rule, much like \textit{conditional proof} and \textit{indirect proof}. (The boxes in the proofs below surround a subproof, much like I do with a vertical bar when I hand write proofs.) \begin{logicproof}{2} \exists x P(...x...) \\ \begin{subproof} P(...s...) & Assumption for EI \\ ... &\\ p & $p$ is any sentence that does not mention $s$ \end{subproof} p & 1, 2-4 EI \end{logicproof} Here's our initial cat argument example: \begin{logicproof}{2} \forall x(Lx \rightarrow Cx) & Premise \\ \exists x (Lx \land Rx) & Premise \\ \begin{subproof} Ls \land Rs & Assumption for EI\\ Ls & 3, Simp\\ Rs & 3, Simp\\ Ls \rightarrow Cs & 1, UI\\ Cs & 4, 6 MP\\ Cs \land Rs & 5, 7 Conj\\ \exists x (Cx \land Rx) & 8, EG (Notice $s$ does not appear here) \end{subproof} \exists x (Cx \land Rx) & 2, 3-9 EI \end{logicproof} An alternate way to schematize our EI rule is to place a restriction on what constant we're allowed to substitute for variables bound by the existential quantifier we're removing. In particular, we must pick a new constant, one that does not appear earlier in our proof. \begin{logicproof}{1} \exists x P(...x...)\\ ...\\ P(...c...) & 1, EI ($c$ must be a new constant, not appearing earlier in the proof) \end{logicproof} The result of either version of the rule is the same sort of restriction on how we may use the EI. \newpage \paragraph{5. Quantifier Negation QN and Quantifier Equivalence QE} In addition to the rules allowing us to introduce or eliminate the two quantifiers, we have some rules allowing us to translate from one quantifier to the other and visa versa as well as some natural equivalences between quantified statements. Here are some QN rules. Let $W$ be some well formed formula. I left out the variables for readability. \begin{enumerate} \item $\forall W \equiv \neg \exists \neg W$ \item $\exists W \equiv \neg \forall \neg W$ \item $\neg \forall W \equiv \exists \neg W$ \item $\neg \exists W \equiv \forall \neg W$ \end{enumerate} Here are some QEs or quantifier equivalences. \begin{enumerate} \item $\forall x \forall y W \equiv \forall y \forall x W$ \item $\exists x \exists y W \equiv \exists y \exists x W$ \item $\forall (Px \land Qx) \equiv \forall x Px \land \forall y Qy$ \item $\exists (Px \lor Qx) \equiv \exists x Px \lor \exists y Qy$ \item $\forall x Wx \equiv \forall y W(x/y)$ Where $(x/y)$ means replace each instance of $x$ with $y$ \item $\exists x Wx \equiv \exists y W(x/y)$ Where $(x/y)$ means replace each instance of $x$ with $y$ \end{enumerate} \paragraph{6. Multiple Quantification} To represent the sentence, \textit{Someone gave a bracelet to Alice} we need to associate quantifier phrases with two of the noun phrase positions in the predicative context: $x$ gave $y$ to $z$ ($Gxyz$). There's no special problem about this; we simply prefix both quantifiers, using the variables to link each quantifier with the appropriate noun phrase: $\exists x \exists y (Gxya \land Px \land By)$, where $Px$ means "$x$ is a person" and $By$ means "$y$ is a bracelet."\\ Another example. \textit{Any elephant is larger than every person}: $\forall x \forall y ((Ex \land Py) \rightarrow Lxy)$ \end{document}
{ "alphanum_fraction": 0.7140079812, "avg_line_length": 48.865, "ext": "tex", "hexsha": "67940a15033e3d2b3847b8f0b9b5c7575180a93d", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-02-21T21:22:55.000Z", "max_forks_repo_forks_event_min_datetime": "2019-02-21T21:22:55.000Z", "max_forks_repo_head_hexsha": "755cdeaa36f4eac817d09efe29550843fa5a4fdc", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "DavidJLu/CCUT", "max_forks_repo_path": "docs/CS251/QuantifierRules.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "755cdeaa36f4eac817d09efe29550843fa5a4fdc", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "DavidJLu/CCUT", "max_issues_repo_path": "docs/CS251/QuantifierRules.tex", "max_line_length": 541, "max_stars_count": 5, "max_stars_repo_head_hexsha": "755cdeaa36f4eac817d09efe29550843fa5a4fdc", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "DavidJLu/CCUT", "max_stars_repo_path": "docs/CS251/QuantifierRules.tex", "max_stars_repo_stars_event_max_datetime": "2021-02-25T21:59:58.000Z", "max_stars_repo_stars_event_min_datetime": "2018-06-04T16:11:56.000Z", "num_tokens": 2707, "size": 9773 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % American Geophysical Union (AGU) % LaTeX Template % Version 1.0 (3/6/13) % % This template has been downloaded from: % http://www.LaTeXTemplates.com % % Original author: % The AGUTeX class and agu-ps referencing style were created and are owned % by AGU: http://publications.agu.org/author-resource-center/author-guide/latex-formatting-toolkit/ % % This template has been modified from the blank AGU template to include % examples of how to insert content and drastically change commenting. The % structural integrity is maintained as in the original blank template. % % Important notes: % This template retains extensive commenting from the AGU template. It is heavily % advised you read these comments and follow them in order to insure a speedy % submission process. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % AGUtmpl.tex: this template file is for articles formatted with LaTeX2e, % Modified March 2013 % % This template includes commands and instructions % given in the order necessary to produce a final output that will % satisfy AGU requirements. % % PLEASE DO NOT USE YOUR OWN MACROS % DO NOT USE \newcommand, \renewcommand, or \def. % % FOR FIGURES, DO NOT USE \psfrag or \subfigure. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % All questions should be e-mailed to [email protected]. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Step 1: Set the \documentclass % There are two options for article format: two column (default) and draft. % PLEASE USE THE DRAFT OPTION TO SUBMIT YOUR PAPERS. % The draft option produces double spaced output. % Choose the journal abbreviation for the journal you are submitting to: % jgrga JOURNAL OF GEOPHYSICAL RESEARCH % gbc GLOBAL BIOCHEMICAL CYCLES % grl GEOPHYSICAL RESEARCH LETTERS % pal PALEOCEANOGRAPHY % ras RADIO SCIENCE % rog REVIEWS OF GEOPHYSICS % tec TECTONICS % wrr WATER RESOURCES RESEARCH % gc GEOCHEMISTRY, GEOPHYSICS, GEOSYSTEMS % sw SPACE WEATHER % ms JAMES % % % % (If you are submitting to a journal other than jgrga, % substitute the initials of the journal for "jgrga" below.) \documentclass[two column,grl]{AGUTeX} % To create numbered lines: % If you don't already have lineno.sty, you can download it from http://www.ctan.org/tex-archive/macros/latex/contrib/ednotes/ (or search the internet for lineno.sty ctan), available at TeX Archive Network (CTAN). Take care that you always use the latest version. % To activate the commands, uncomment \usepackage{lineno} and \linenumbers*[1]command, below: %\usepackage{lineno} %\linenumbers*[1] %\usepackage{natbib} % To add line numbers to lines with equations: % \begin{linenomath*} % \begin{equation} % \end{equation} % \end{linenomath*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Figures and Tables % DO NOT USE \psfrag or \subfigure commands. % Figures and tables should be placed AT THE END OF THE ARTICLE, after the references. % Uncomment the following command to include .eps files (comment out this line for draft format): \usepackage{graphicx} \usepackage{epstopdf} \usepackage{epsfig} % Substitute one of the following for [dvips] above if you are using a different driver program and want to proof your illustrations on your machine: % [xdvi], [dvipdf], [dvipsone], [dviwindo], [emtex], [dviwin], % [pctexps], [pctexwin], [pctexhp], [pctex32], [truetex], [tcidvi], % [oztex], [textures] % Uncomment the following command to allow illustrations to print when using Draft: \setkeys{Gin}{draft=false} % See how to enter figures and tables at the end of the article, after references. %---------------------------------------------------------------------------------------- % RUNNING HEAD AND CORRESPONDING AUTHOR %---------------------------------------------------------------------------------------- % Author names in capital letters: \authorrunninghead{NGUYEN ET AL.} %------------------------------------------------ % Shorter version of title entered in capital letters: \titlerunninghead{METHANE EMISSIONS ESTIMATES} %------------------------------------------------ % Corresponding author mailing address and e-mail address: \authoraddr{: Newton Nguyen, Department of Environmental Science and Engineering, California Institute of Technology, Pasadena, California, USA. ([email protected])} %---------------------------------------------------------------------------------------- \begin{document} %---------------------------------------------------------------------------------------- % TITLE %---------------------------------------------------------------------------------------- \title{Effects of Interactive Hydroxyl Chemistry on Decadal Methane Emissions Estimates} %---------------------------------------------------------------------------------------- % AUTHORS AND AFFILIATIONS %---------------------------------------------------------------------------------------- % Use \author{\altaffilmark{}} and \altaffiltext{} % \altaffilmark will produce footnote; matching \altaffiltext will appear at bottom of page. \authors{Newton Nguyen,\altaffilmark{1}} %James Smith,\altaffilmark{1,2}, and %Jane Smith\altaffilmark{2}} \altaffiltext{1}{Department of Environmental Science and Engineering, California Institute of Technology, Pasadena, California, USA.} %---------------------------------------------------------------------------------------- % ABSTRACT %---------------------------------------------------------------------------------------- % Do NOT include any \begin...\end commands within the body of the abstract. \begin{abstract} The rise of atmospheric methane experienced a brief pause between 2000 and 2007 with a subsequent resumption in growth continuing to the present day. Questions on both the commencement and termination of this stabilization remain open due to uncertainty in methane sources and its main sink, reaction with the hydroxyl radical (OH). In the atmosphere, methane undergoes oxidation reactions and thus, its lifetime depends on the concentration of hydroxyl radicals. Methyl Chloroform has been used to constrain global hydroxyl concentrations and infer methane lifetimes. Here, we report on results using a two-hemispheres box model that includes dynamic chemistry of methane, carbon monoxide, and the hydroxyl radical. We will discuss the impact of critical assumptions on methane flux inversions: 1) Constant OH concentrations and 2) Constant OH sources with interactive chemistry assuming constant or variable CO, 3) variable OH sources, 4) variations in OH inter-hemispheric gradients. Hemispheric sources are calculated from a nonlinear, stochastic Bayesian inversion constrained by data from methane (NOAA), carbon monoxide (NOAA), and Methyl chloroform (NOAA, GAGE/AGAGE) observations. We perform sensitivity experiments by calculating the impact that including each aspect of the chemical system has on the emissions estimates. Based on our results, we find that when ignoring interactive OH chemistry, methane emissions have a negative bias at the beginning of the 21st century, during negative OH anomalies. However, when CO emissions are accounted for, the reduction in CO emissions beginning in the 1990’s results in increasing methane emissions but relatively higher OH concentrations; this is due to CO emissions decreasing during this period. We also find that when account for a variable OH source, our results agree with those found in \citep{turner_ambiguity_2017}. \end{abstract} %---------------------------------------------------------------------------------------- % ARTICLE CONTENT %---------------------------------------------------------------------------------------- % The body of the article must start with a \begin{article} command % \end{article} must follow the references section, before the figures and tables. \begin{article} \section{Introduction} The steady rise of methane concentrations in the Earth’s atmosphere was interrupted by a brief stabilization from 2000 to 2007 with a subsequent resumption in growth; the causes of the onset and renewed growth remain ambiguous. Overall, methane concentrations have tripled from 750 ppb in 1850 to 1850 ppb in the present day \citep{IPCC}. Methane is not only the second most important anthropogenic greenhouse gas but also a precursor to tropospheric ozone, itself a greenhouse gas and air pollutant \citep{fiore_impact_2006, shindell_simultaneously_2012, IPCC}. Its sources are spatially and temporally heterogeneous and its sinks can also be variable, resulting in major uncertainties in determining the cause of the stabilization. \begin{figure} \begin{center} \includegraphics[width= 0.45\textwidth]{./methane_timeseries.png} \end{center} \caption{Plotted is a timeseries of methane concentrations collected through ice core measurements from the last ~2,000 years. On the right, decadal scale concentrations are shown with focus on the Methane Stabilization.} \end{figure} Changes in methane sources have been proposed as possible explanations for the cause of the renewed growth. Major sources include microbial production in wetlands, permafrost melting in the high latitudes, biomass burning, rice cultivation, solid and liquid waste, and activities related to fossil fuel extraction. A shift in global $\delta^{13}C$ towards more depleted carbon indicates that the end of the stabilization could have been driven by a change in methane sources \citep{bousquet_contribution_2006, kai_reduced_2011}. Biogenic sources favor lighter C and a shift towards more depleted $\delta^{13}C$ can indicate either an increase in isotopically lighter, e.g. biogenic, sources or a decrease in heavier, e.g. pyrogenic and thermogenic, sources \citep{nisbet_rising_nodate, schaefer2016}. An analysis by \citep{nisbet_rising_nodate} concludes that a shift towards lighter isotopes demonstrate an increase in wetland emissions, while \citep{schaefer2016} conclude that the isotopic shift is due to increased emissions from agricultural production. Yet, the change in the isotopic composition can also have been caused by a decrease in biomass burning sources, which are isotopically heavy, alongside an increase in fossil fuel sources, which are generally lighter than biomass burning sources and heavier than biogenic sources; \citep{worden_reduced_2017} concludes this. These studies came to different conclusions with the same constraints indicating that the problem may be underdetermined. Other studies have focused on a change in the methane sink. Methane loss is primarily driven by oxidation via OH radicals but also transport to the stratosphere, consumption by methanotrophic microbes in soils, and oxidation by chlorine radicals. The Hydroxyl Radical accounts for about 90\% of methane loss and thus is the main mechanism of methane loss in the troposphere. \citep{turner_ambiguity_2017} and \citep{rigby_role_2017}, using Methyl Chloroform (MCF) as a proxy for Hydroxyl Radical concentrations, conclude that changes in hydroxyl concentrations occurred during the stabilization period. Yet, their conclusions differ. \citep{rigby_role_2017} conclude that a decline in Hydroxyl concentrations accompanied by a slow-down in the methane growth rate was implicated in the renewed growth, while \citep{turner_ambiguity_2017} find that the renewed growth was caused by a decline in Hydroxyl radicals with a decrease in methane emissions. Yet, a causal mechanism for a decrease in hydroxyl concentrations has not been found. Quantifying the effect of dynamical Hydroxyl concentrations on methane emissions estimates provides a useful constraint on the methane stability problem. The relationship between methane concentrations and emissions is nonlinear; this increases the complexity of the system and makes emissions estimates more difficult. Methane is a short lived greenhouse gas with a lifetime of approximately a decade. However, it’s lifetime primarily depends on the concentration of the Hydroxyl Radical, e.g. higher concentrations of OH results in a shorter methane lifetime. In addition, the oxidation of methane yields carbon monoxide which also consumes hydroxyl radicals \citep{prather_lifetimes_2007}. Previous studies use MCF to constrain methane lifetime \citep{rigby_role_2017, turner_ambiguity_2017}. We instead account for variable methane lifetime by including dynamic CH4, CO, and OH chemistry in our model. The objective of the present study is to quantify the systematic biases when including (excluding) variable methane lifetimes and thus interactive chemistry in the estimation of emissions. Here, we include interactive methane chemistry in our inversions by using a two hemispheres box model with CH4, CO, and OH chemistry. We will discuss the impact of critical assumptions on methane flux inversions: 1) Constant OH concentrations and 2) Constant OH sources with interactive chemistry assuming constant or variable CO, 3) variable OH sources, 4) variations in OH inter-hemispheric gradients. Section 2 provides the theoretical background for the variability of methane lifetimes, Section 3 describes the model and data employed in the current study, and in Section 4, we discuss the conclusions of the study and provide recommendations for future work in methane emissions estimates over the decadal timescale. %------------------------------------------------ \section{Theoretical Background and Forward Model} We use the model developed by \citep{prather_time_1996, prather1994lifetimes} in the developement of our forward model. In the atmosphere, the hydroxyl radical is the primary mechanism of methane loss. \begin{equation} OH + CH_4 \Longrightarrow (multiple steps) \Longrightarrow CO + products \\ \end{equation} CO is the largest sink of OH in the troposphere and thus, the concentration of OH depends on both the concentrations of CH4 and CO. \begin{equation} OH + CO \Longrightarrow CO_2 + H \end{equation} In addition, the hydroxyl radical also reacts with other compounds in the atmosphere such as ethane, propane, and other non methane hydrocarbons in addition to being recycled with the emission of NOx. Therefore, we create another OH loss reaction that abstracts this complexity with an arbitrary group of molecules X \citep{prather_time_1996, prather1994lifetimes}. \begin{equation} OH + X \Longrightarrow products \end{equation} These reactions form the OH loss terms in our box model. From these reactions and their kinetics, we can form a set of coupled differential equations: \begin{eqnarray} R_1 &= k_1 [OH] [CH_4] \\ R_2 &= k_2 [OH] [CO] \\ R_3 &= k_3 [OH] [X] \\ \frac{d[CH_4]}{dt} &= SCH_4 - R_1 - \frac{\Delta[CH_4]}{\tau}\\ \frac{d[CO]}{dt} &= SCO + R_1 - R_2 - \frac{\Delta[CO]}{\tau}\\ \frac{d[OH]}{dt} &= SOH - R_1 - R_2 - R_3 - \frac{\Delta[OH]}{\tau} \end{eqnarray} Above, $\Delta[c]$ is the difference in the concentration of species $c$ between the Northern and Southern Hemispheres, $\tau$ is the inter hemispheric exchange timescale, ~1 yr, and hence, $\Delta[c]/\tau$ is the inter hemispheric transport term. Finally, $S_c$ is the source term which represents the emission of the species $c$ and in the case of OH, represents the production rate. These equations are computed in our forward model. \\ As a test of our forward model, we run the model with prescribed emissions. We then add a 50% instantaneous perturbation to methane emissions and observe the behavior of the system with interactive and noninteractive OH chemistry Fig. \ref{forward_model}. the perturbation lifetime of the non interactive chemistry model decays with a ~9 yr lifetime while the interactive chemistry decays with a ~13 yr lifetime. This is expected \citep{prather_time_1996, prather1994lifetimes} and indicates that our forward model is accurately approximating the chemical system to first order. \begin{figure} \label{forward_model} \begin{center} \includegraphics[width=0.5\textwidth]{forward_model_test.eps} \end{center} \end{figure} %% Maybe have the MCF equation instead of OH %------------------------------------------------ \section{Methods} \subsection{Data and Inversion} Our box model (section 2) maps emissions to concentrations and thus, by inverting our model, we map concentrations to emissions. Emissions are estimated using a nonlinear bayesian inversion method, the Levenberg-Marquardt algorithm. We use observations of methane (NOAA), carbon monoxide (NOAA), and Methyl chloroform (NOAA, GAGE/AGAGE) concentrations. Concentration measurements began in 1983 and continue into the present day. For our inversions, we take hemispheric averages by bootstrapping the deseasonalized data. These observations are combined by block averaging in 1 yr windows to form a timeseries. This process is repeated 50 times so that the mean and variance of these timeseries can be calculated. The data are then inputted into our inversion and emissions are derived by minimizing the cost function with our box model as the forward model. \subsection{Simulations} We first perform an idealized inversion test by globally prescribing fixed emissions until an arbitrary year and prescribe an increase emissions by 20 Tg/yr in our forward model with interactive OH chemistry. The resulting concentrations are inverted for emissions in two different runs where we A), assume non interactive OH chemistry and B), assume interactive OH chemistry. This test serves two purposes: 1), We can test the performance of our inversion and 2), we can calculate the error when concentrations, calculated with interactive OH chemistry, are inverted for emissions while neglecting interactive OH chemistry. This is equivalent to computing the forward model error of using a simple fixed OH concentration in atmospheric methane inversions, which is common practice. From the results of our Synthetic Emissions Test, Fig. \ref{synthetic_emissions}, we find that the forward model error when not accounting for interactive OH chemistry is in close agreement with the prescribed emissions. This implies our inversion is accurately solving for emissions despite the added complexity to the forward model. However, the inversion without interactive OH chemistry begins changing sign when the prescribed emissions spike begins Fig. \ref{synthetic_emissions}. This is due to the fact that increased Methane emissions decrease Hydroxyl concentrations. The noninteractive OH inversion does not account for this OH concentration response and results in an underestimation in an atmosphere with higher OH concentrations and an overestimation in an atmosphere with lower OH concentrations. This is discussed further in Section 4. \begin{figure*} \label{synthetic_emissions} \begin{center} \includegraphics[width=\textwidth]{synthetic_emissions_test.eps} \end{center} \caption{A test of our inversion. We prescribe emissions and increase it by 20 Tg/yr to test our inversion. We then run our inversion with interactive OH chemistry and non interactive OH chemistry} \end{figure*} In addition, we run our inversions with increasing levels of complexity in order to obtain the biases associated with including (excluding) interactive OH chemistry and carbon monoxide in emissions estimates. \ref{model_setup} shows the model runs. In our experiments, Case 1, our simplest run, assumes fixed OH concentrations (non interactive chemistry). Case 2 assumes fixed CO and OH sources with interactive chemistry while Case 3 accounts for the temporal variability of CO sources. Case 4 incorporates the variability of OH sources without variable CO sources while Case 5 accounts for the temporal variability of CO sources with variable OH sources. The results of thes runs are plotted in Fig. \ref{all_runs}. \begin{table} \label{model_setup} \caption{Our simulations vary in complexity. Below are the assumptions in each model run.} \begin{tabular}{| c | c| c| c|} Case & Interactive OH & Inverting with CO & Inverting for OH Source \\ \hline 1 & no & no & no \\ 2 & yes & no & no \\ 3 & yes & yes & no \\ 4 & yes & no & yes \\ 5 & yes & yes & yes \end{tabular} \end{table} \begin{figure*} \label{all_runs} \begin{center} \includegraphics[width=\textwidth]{case_emissions.eps} \end{center} \caption{Plotted are the results from our inversion for all five cases in Table 1. Calculated methane emissions are on the top left panel, CO emissions are on the top right panel, and OH concentrations are on the bottom left panel.} \end{figure*} %------------------------------------------------ \section{Discussion} In this study, we estimated global methane emissions in a 2 hemispheres box model and quantify the biases associated with critical assumptions in decadal methane emissions estimates between the years 1980 to 2016 (Table 1). When excluding interactive OH chemistry, (fig. xx), our results indicate a bias that depends on the availability of OH radicals in the atmosphere.. When not accounting for interactive OH chemistry, higher OH concentrations result in a negative bias and low OH concentrations result in a positive bias. This can be seen in Fig xx where increased methane emissions at the turn of the century decrease the amount of OH radicals and an overestimation of emissions (Fig. \ref{OH_bias}). In case 1, with fixed OH concentrations, methane emissions increase by 6.6\% between 1996 and 2016. In case 2, methane emissions increase by 6.9\% while global mean OH concentrations decrease by 0.95\%. \begin{figure*} \label{OH_bias} \begin{center} \includegraphics[width=\textwidth]{OH_variability_bias.eps} \\ \end{center} \caption{ Above are inversion results from ignoring interactive OH chemistry (Case 1) and accounting for interactive OH chemistry with assuming a fixed CO emission (Case 2). The left panel shows calculated emissions from 1980 to 2016. The right panel shows the difference in $CH_4$ emissions with the assumptions outlined in Table 1.} \end{figure*} when CO emissions are increasing before the 1990’s, increased emissions caused a decrease in OH concentration and a reduced estimation of methane emissions. However, with the decrease in CO emissions beginning in the 1990’s, we see an increased availability of OH radicals to oxidize Methane, and hence, decreasing Methane lifetime results in increasing Methane emissions to match observations (Fig. \ref{CO_bias} \ref{all_runs}. In case 3, global methane emissions increase by 7.4\% with a decrease in OH concentrations by 0.5\% between 1996 and 2016. This indicates that the reduction in CO more than compensates for the increased methane emissions in OH loss. \begin{figure*} \label{CO_bias} \begin{center} \includegraphics[width=\textwidth]{CO_bias.eps} \end{center} \caption{Plotted are the results from our inversion for cases 1-3. Methane emissions are on the left panel and CO concentrations are on the right panel.} \end{figure*} In Case 4 and Case 5, we additionally fit for variability in the OH production rate. Case 4 assumes constant CO emissions while Case 5 accounts for the variability of CO emissions. The differences between Methane emissions in both cases is small. They also generally agree with the solution obtained by \citep{turner_ambiguity_2017}. In summary, we find that not accounting for interactive OH chemistry and the variability of CO emissions biases the results of methane emissions estimates in opposing ways. Methane lifetime depends on the concentration of the Hydroxyl Radical which, in turn, depends on the concentration of CO and CH4. In decadal Methane emissions estimates, we find a systematic negative bias in inversions that do not account for this chemical system. This is due to the decline of CO emissions starting in the 1990’s overcompensating for the loss of OH due to CH4. However, when allowing for a varying OH source term, our results match that of \citep{turner_ambiguity_2017} because of compensating OH production accounting for variabilities in OH concentrations due to interactive chemistry. We conclude that future work in estimating methane emissions in the decadal timescale should account for the interactive OH chemistry that takes place in the troposphere, and more work should be done to constrain trends in the concentration and production of Hydroxyl radicals due to its effect on the methane lifetime. %---------------------------------------------------------------------------------------- % ACKNOWLEDGEMENTS %---------------------------------------------------------------------------------------- \begin{acknowledgments} Many thanks to Christian Frankenberg, Alex Turner, and Yi Yin for the invaluable assistance in preparing this project. \end{acknowledgments} \bibliography{citations}{} \bibliographystyle{apalike} %---------------------------------------------------------------------------------------- % BIBLIOGRAPHY %---------------------------------------------------------------------------------------- % Either type in your references using % \begin{thebibliography}{} % \bibitem{} % Text % \end{thebibliography} % Or, % If you use BiBTeX for your references, please use the agufull08.bst file (available at % ftp://ftp.agu.org/journals/latex/journals/Manuscript-Preparation/) to produce your .bbl % file and copy the contents into your paper here. % Follow these steps: % 1. Run LaTeX on your LaTeX file. % 2. Make sure the bibliography style appears as \bibliographystyle{agufull08}. Run BiBTeX on your LaTeX % file. % 3. Open the new .bbl file containing the reference list and % copy all the contents into your LaTeX file here. % 4. Comment out the old \bibliographystyle and \bibliography commands. % 5. Run LaTeX on your new file before submitting. % AGU does not want a .bib or a .bbl file. Please copy in the contents of your .bbl file here. \end{article} %---------------------------------------------------------------------------------------- % FIGURES AND TABLES %---------------------------------------------------------------------------------------- %% Enter Figures and Tables here: % % DO NOT USE \psfrag or \subfigure commands. % % Figure captions go below the figure. % Table titles go above tables; all other caption information should be placed in footnotes below the table. % %---------------- % EXAMPLE FIGURE % % \begin{figure} % \noindent\includegraphics[width=20pc]{samplefigure.eps} % \caption{Caption text here} % \label{figure_label} % \end{figure} % % --------------- % EXAMPLE TABLE % %\begin{table} %\caption{Time of the Transition Between Phase 1 and Phase 2\tablenotemark{a}} %\centering %\begin{tabular}{l c} %\hline % Run & Time (min) \\ %\hline % $l1$ & 260 \\ % $l2$ & 300 \\ % $l3$ & 340 \\ % $h1$ & 270 \\ % $h2$ & 250 \\ % $h3$ & 380 \\ % $r1$ & 370 \\ % $r2$ & 390 \\ %\hline %\end{tabular} %\tablenotetext{a}{Footnote text here.} %\end{table} % See below for how to make sideways figures or tables. \newpage \end{document} % IF YOU HAVE MULTI-LINE EQUATIONS, PLEASE % BREAK THE EQUATIONS INTO TWO OR MORE LINES % OF SINGLE COLUMN WIDTH (20 pc, 8.3 cm) % using double backslashes (\\). % To create multiline equations, use the % \begin{eqnarray} and \end{eqnarray} environment % as demonstrated below. %If you don't want an equation number, use the star form: %\begin{eqnarray*}...\end{eqnarray*} % Break each line at a sign of operation % (+, -, etc.) if possible, with the sign of operation % on the new line. % Indent second and subsequent lines to align with the first character following the equal sign on the first line. % Use an \hspace{} command to insert horizontal space into your equation if necessary. Place an appropriate unit of measure between the curly braces, e.g. \hspace{1in}; you may have to experiment to achieve the correct amount of space. %% ------------------------------------------------------------------------ %% % % EQUATION NUMBERING: COUNTER % %% ------------------------------------------------------------------------ %% % You may change equation numbering by resetting % the equation counter or by explicitly numbering % an equation. % To explicitly number an equation, type \eqnum{} % (with the desired number between the brackets) % after the \begin{equation} or \begin{eqnarray} % command. The \eqnum{} command will affect only % the equation it appears with; LaTeX will number % any equations appearing later in the manuscript % according to the equation counter. % % If you have a multiline equation that needs only % one equation number, use a \nonumber command in % front of the double backslashes (\\) as shown in % the multiline equation above. %% ------------------------------------------------------------------------ %% % % SIDEWAYS FIGURE AND TABLE EXAMPLES % %% ------------------------------------------------------------------------ %% % % For tables and figures, add \usepackage{rotating} to the paper and add the rotating.sty file to the folder. % AGU prefers the use of {sidewaystable} over {landscapetable} as it causes fewer problems. % % \begin{sidewaysfigure} % \includegraphics[width=20pc]{samplefigure.eps} % \caption{caption here} % \label{label_here} % \end{sidewaysfigure} % % \begin{sidewaystable} % \caption{} % \begin{tabular} % Table layout here. % \end{tabular} % \end{sidewaystable}
{ "alphanum_fraction": 0.7188177874, "avg_line_length": 62.6411889597, "ext": "tex", "hexsha": "49e8b40dbd95aab1d83831fdb9aff92fad5abd91", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5cac4557472a0650629f6b4f81e55e4ed642d41e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Newton-Climate/box_model_paper", "max_forks_repo_path": "main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5cac4557472a0650629f6b4f81e55e4ed642d41e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Newton-Climate/box_model_paper", "max_issues_repo_path": "main.tex", "max_line_length": 1880, "max_stars_count": null, "max_stars_repo_head_hexsha": "5cac4557472a0650629f6b4f81e55e4ed642d41e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Newton-Climate/box_model_paper", "max_stars_repo_path": "main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6585, "size": 29504 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % UMB-CS110-2015S: Introduction to Computing % Copyright 2015 Pejman Ghorbanzade <[email protected]> % Creative Commons Attribution-ShareAlike 4.0 International License % More info: https://github.com/ghorbanzade/UMB-CS110-2015S %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \def \topDirectory {.} \def \texDirectory {\topDirectory/src/main/tex} \documentclass[12pt,letterpaper,twoside]{article} \usepackage{\texDirectory/template/style/directives} \usepackage{\texDirectory/template/style/assignment} \input{\texDirectory/template/config} \begin{document} \doc{title}{Solution to Assignment 5} \doc{date-pub}{Apr 19, 2015 at 00:00 AM} \doc{date-due}{Apr 30, 2015 at 05:30 PM} \doc{points}{8} \prepare{header} \section*{Question 1} Write a program \texttt{FlightTest.java} that controls two airplanes, one \texttt{plane1} a commercial plane from \texttt{Eagle} company and one \texttt{plane2}, a fighter-bomber from \texttt{Dragon} company. All airplanes fly at some altitude and with some speed. They take off and land. Just as bombers may drop bomb (if they have any) while they are in sky, commercials can board passengers (as much as their capacity allows) while grounded. Assuming all planes made by \texttt{Eagle} have 300 seats, fly at 39000 ft and 550 mph and all planes made by \texttt{Dragon} have 2 bomb bays and fly at 30000 ft and 1000 mph, develop classes \texttt{Aircraft}, \texttt{Bomber}, \texttt{Commercial}, \texttt{Eagle} and \texttt{Dragon} and use them in a program \texttt{FlightTest.java}. \section*{Solution} \begin{enumerate} \item Class \texttt{Aircraft.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public abstract class Aircraft { // attributes private double flyAltitude; private double flySpeed; private double altitude; private double speed; private boolean grounded; // methods public void takeOff() { this.grounded = false; this.altitude = this.flyAltitude; this.speed = this.flySpeed; System.out.println("Plane took off."); } public void land() { this.grounded = true; this.altitude = 0; this.speed = 0; System.out.println("Plane landed."); } // constructor public Aircraft(double altitude, double speed) { this.flyAltitude = altitude; this.flySpeed = speed; this.land(); System.out.println("New Aircraft Made."); } // getters and setters public boolean isGrounded() { return grounded; } public double getAltitude() { return altitude; } public double getSpeed() { return speed; } } \end{lstlisting} \item Class \texttt{Bomber.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public abstract class Bomber extends Aircraft { // attributes private int numBombs; // methods public void dropBomb() { if (!this.isGrounded() && numBombs > 0) { numBombs--; System.out.println("Bomb dropped."); } } // constructor public Bomber(double altitude, double speed, int bombs) { super(altitude, speed); this.numBombs = bombs; System.out.println("A new bomber!"); } // getters and setters public int getNumBombs() { return numBombs; } } \end{lstlisting} \item Class \texttt{Commercial.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public abstract class Commercial extends Aircraft { // attributes private int numPassenger; // methods public void board(int num) { if (this.isGrounded()) { numPassenger += num; System.out.println(num + " passenger(s) aboard."); } } // constructor public Commercial(double altitude, double speed) { super(altitude, speed); numPassenger = 0; System.out.println("A new commercial!"); } // getters and setters public int getNumPassenger() { return numPassenger; } } \end{lstlisting} \item Class \texttt{Eagle.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public class Eagle extends Commercial { // attributes private static int maxPassenger = 300; private static double altitude = 39000; private static double speed = 550; // methods public void board() { this.board(1); } public void board(int num) { if (this.getNumPassenger() + num < maxPassenger) { super.board(num); } } // constructor public Eagle() { super(altitude, speed); System.out.println("A new eagle!"); } // getters and setters } \end{lstlisting} \item Class \texttt{Dragon.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public class Dragon extends Bomber { // attributes private static int maxNumBombs = 2; private static double altitude = 30000; private static double speed = 1000; // methods // constructor public Dragon() { super(altitude, speed, maxNumBombs); System.out.println("A new dragon!"); } // getters and setter } \end{lstlisting} \item Class \texttt{FlightTest.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public class FlightTest { public static void main(String[] args) { // instantiate planes // For the commercial plane Eagle plane1 = new Eagle(); // when plane is created, it's grounded // and boarding 10 passengers is possible plane1.board(10); // if the plane takes off plane1.takeOff(); // we can't board passengers anymore plane1.board(10); // and there will only be 10 passengers aboard System.out.println("Num Passengers: "+plane1.getNumPassenger()); // it flies in 39000 ft System.out.println("Altitude: "+plane1.getAltitude()); // and its speed is 550 mph System.out.println("Speed: "+plane1.getSpeed()); // if the plane lands again plane1.land(); // its altitude becomes 0 System.out.println("Altitude: "+plane1.getAltitude()); // as well as its speed System.out.println("Speed: "+plane1.getSpeed()); // As for the bomber plane Dragon plane2 = new Dragon(); // when plane is created, it's grounded // and loaded with 2 bombs System.out.println("Num Bombs: "+plane2.getNumBombs()); // but it cannot drop bomb while grounded plane2.dropBomb(); // if the plane takes off plane2.takeOff(); // it can drop bombs plane2.dropBomb(); // until its bomb bays are empty plane2.dropBomb(); // that's when it cannot drop bomb anymore plane2.dropBomb(); // it flies at 30000 ft System.out.println("Altitude: "+plane2.getAltitude()); // and its speed is 1000 mph System.out.println("Speed: "+plane2.getSpeed()); // until it lands plane2.land(); // its altitude becomes 0 System.out.println("Altitude: "+plane1.getAltitude()); // as well as its speed System.out.println("Speed: "+plane1.getSpeed()); } } \end{lstlisting} \end{enumerate} \section*{Question 2} Your manager has just learned basic concepts of interfaces and abstraction in Java and has asked you to implement the methods in the following interface in a class \texttt{ArraySort.java}. Write a program \texttt{ArraySortTest.java} to show him how to use your class to initialize, shuffle and sort an array. Attach a file \texttt{note.txt} to briefly explain why this is not a good practice and recommend a better alternative. \lstset{language=java,tabsize=2} \begin{lstlisting} public interface sortAndUnsort { // sorts the array using insertion sort public int[] sortInsertion(int[] array); // shuffles the array public int[] shuffle(int[] array); // prints the array elements public void print(int[] array); // initialize array from firstElement to lastElement public int[] init(int firstElement, int lastElement); } \end{lstlisting} \section*{Solution} \begin{enumerate} \item Class \texttt{ArraySort.java} \lstset{language=java, tabsize=2} \begin{lstlisting} public class ArraySort implements sortAndUnsort { // sorts the array using insertion sort @Override public int[] sortInsertion(int[] array) { for (int i = 0; i < array.length; i++) { int minIndex = i; int min = array[i]; for (int j = i; j < array.length; j++) { if (array[j] < min) { min = array[j]; minIndex = j; } } if (i != minIndex) { int temp = array[i]; array[i] = array[minIndex]; array[minIndex] = temp; } } return array; } // shuffles the array @Override public int[] shuffle(int[] array) { for (int i = 0; i < array.length; i++) { int j = (int) (i + (array.length - i)*Math.random()); int temp = array[j]; array[j] = array[i]; array[i] = temp; } return array; } // prints the array elements @Override public void print(int[] array) { for (int i = 0; i < array.length; i++) { System.out.printf("%2d ", array[i]); } System.out.println(); } // initialize array from firstElement to lastElement @Override public int[] init(int firstElement, int lastElement) { int[] array = new int[lastElement - firstElement + 1]; for (int i = 0; i < array.length; i++) { array[i] = firstElement + i; } return array; } } \end{lstlisting} \item Class \texttt{ArraySortTest.java} \lstset{language=java,tabsize=2} \begin{lstlisting} public class ArraySortTest { public static void main(String[] args) { ArraySort instance = new ArraySort(); // initialize array from 1 to 10 int[] array = instance.init(1, 10); // prints the initialized array instance.print(array); // shuffles the array array = instance.shuffle(array); // prints the shuffled array instance.print(array); // sorts array using insertion sort array = instance.sortInsertion(array); // prints the sorted array instance.print(array); } } \end{lstlisting} The disadvantage of using an interface for the proposed objective is that given methods should be implemented in a separate class. Calling these methods require instantiation from \texttt{ArraySort} class. One possible alternative is to develop all desired methods as static methods of normal class \texttt{ArraySort.java} whose constructor is made private, eliminating the need for an interface. This way, the following code snippet can replace the previous \texttt{ArraySortTest.java} file. \lstset{language=java,tabsize=2} \begin{lstlisting} public class ArraySortTestV2 { public static void main(String[] args) { // initialize array from 1 to 10 int[] array = ArraySort.init(1, 10); // prints the initialized array ArraySort.print(array); // shuffles the array array = ArraySort.shuffle(array); // prints the shuffled array ArraySort.print(array); // sorts array using insertion sort array = ArraySort.sortInsertion(array); // prints the sorted array ArraySort.print(array); } } \end{lstlisting} \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7031993906, "avg_line_length": 29.0110497238, "ext": "tex", "hexsha": "d0194f95409b4c2dd663ca02dbcf53852f542867", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "UMB-CS110-2015S/Assignments", "max_forks_repo_path": "src/main/tex/assignments/hw05s.tex", "max_issues_count": 7, "max_issues_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f", "max_issues_repo_issues_event_max_datetime": "2019-03-17T16:39:11.000Z", "max_issues_repo_issues_event_min_datetime": "2015-08-22T15:44:45.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "UMB-CS110-2015S/Assignments", "max_issues_repo_path": "src/main/tex/assignments/hw05s.tex", "max_line_length": 492, "max_stars_count": 1, "max_stars_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "UMB-CS110-2015S/Assignments", "max_stars_repo_path": "src/main/tex/assignments/hw05s.tex", "max_stars_repo_stars_event_max_datetime": "2020-05-03T18:41:40.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-03T18:41:40.000Z", "num_tokens": 2877, "size": 10502 }
\section{Introduction} This paper attempts to summarise some important defintions and theorems in theory of dynamical systems (DS). We only presume basic knowledge of topology and analysis. When we say 'time interval' we simply refer to a connected open subset $U$ of $\rz$ and 'time' to an element in $U$. \subsection{Background} Let $(X,d)$ be a metric space, that is there exists a map $$d : X \times X \longrightarrow \rz,$$ such that $d(x,y) \geq 0$, for all $x, y \in X$, $d(x,x) = 0$ and $$d(x,z) \leq d(x,y) + d(y,z),\ \forall x, y, z \in X.$$ \subsubsection{Dynamical systems} Given a metric space $(X,d)$ and map $f : X \longrightarrow X$, a time discrete dynamical system (TD) is a map $$\phi : \nz \times X \longrightarrow X, (n,x) \longmapsto f^n (x) := \underbrace{f \circ \ldots \circ f}_{n-\mathrm{times}}(x)$$ inducing a sequence $(f^n(x))_{n \in \nz} \subset X$ for all $x \in X$ and $n \geq 0$. A time-continuous dynamical system (TC) is a map $$\varphi : \rz \times X \longrightarrow X,$$ such that $\varphi(0,x) = x$ and $\varphi(t + s,x) = \varphi(s,\varphi(t,x)) = \varphi(t,\varphi(s,x))$ for all $t, s \in U \subset \rz$ open and $x \in X$. We denote a TC as $$(X, d, \varphi, U).$$ We remark that a given a time-continous dynamical system $(X,d,\varphi)$ we get a family of maps $\{f_t : X \longrightarrow X\}_{t \in U}$ indexed by time such that $f_0 = id_X$ and $\varphi(t,x) = f_t(x)$ for all $x \in X$ and $t \in \rz$, as well as $$f_s(f_t(x)) = f_t(f_s(x)).$$ Furthermore, we get a time-discrete DS when we restrict the domain accordingly. What this entails will be later discussed.\\ \indent In addition, we restrict the time domain to an open subset as there are examples of smooth TCs which do not exist for all times: $$\partial(x) = x^2 \Rightarrow \varphi(t,x_0) = \frac{1}{1 - x_0(t - t_0)}, \forall t \geq t_0$$ has no smooth solution $x \in C^\infty(\rz)$ for all initial conditions $(t_0,x_0) \in U \times X$ (only locally smooth solution). \begin{defi} Let $(X, d, \varphi, U)$ be a TC and $x_0 \in X$. We call a map $x : U \longrightarrow X$ a solution if $x(t) = \varphi(t,x_0)$ for all $t \in U$. If $U = \rz$ we call $x \in X^U$ a global solution. \end{defi} \subsubsection{Attractors and basin of attraction} Until now, we did not need the metric $d$. However, the next definition requires this map. Moreover, let $X$ be complete wrt. to the metric induced topology. \begin{defi}[Metric attractors] Let $(X, d, \varphi, U)$ be a TC. A subset $A \subset X$ is called an attractor if there exists an open neighborhood $V \supset A$ such that $$\forall \varepsilon > 0\ \exists T \in U\ \forall t \geq T:\ d(\varphi(t,x),A) < \varepsilon,$$ for all $x \in V$. This property is denoted as usual as a limit: $$A := \{x \in V : \lim_{t \rightarrow \infty} \varphi(t,x) \in A\}$$ %We remark that not all metric spaces do have a limit, i.e. $\varphi We call the union of all neighborhoods $V \supset A$, such that $$\lim_{t \rightarrow \infty}\varphi(t,x) \in A$$ the basin of attraction, denoted by $B(A)$. \end{defi} Again, strictly speaking we only need a (complete) topological space $(X,\tau)$ with a neighborhood basis $\beta(x)$ wrt. $x \in X$: \begin{defi}[Topological attractor] Let $(X, \tau, \varphi, U)$ be a TC. A subset $A \subset X$ is called an attractor if there exists an open neighborhood $V \supset A$ such that $$\forall W(A) \in \beta(A)\ \exists T \in U\ \forall t \geq T:\ \varphi(t,x) \in W(A),$$ for all $x \in V$. We call the union of all neighborhoods $V \supset A$, such that $$\lim_{t \rightarrow \infty}\varphi(t,x) \in A$$ the basin of attraction, denoted by $B(A)$. \end{defi} Here, $$W(A) = \bigcup_{x \in A} W(x),\ \beta(A) = \bigcup_{x \in A} \beta(x)\ \mathrm{where}\ W(x) \in \beta(x)\ \forall x \in X.$$
{ "alphanum_fraction": 0.6681649921, "avg_line_length": 84.0444444444, "ext": "tex", "hexsha": "556d6bed6a30471456990effa101b3f40d5d61d0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "gmuel/texlib", "max_forks_repo_path": "dynamic/intro.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "gmuel/texlib", "max_issues_repo_path": "dynamic/intro.tex", "max_line_length": 283, "max_stars_count": null, "max_stars_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "gmuel/texlib", "max_stars_repo_path": "dynamic/intro.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1289, "size": 3782 }
% !TEX root =../thesis-letomes.tex \chapter{Physical Modelling} We will model two systems: \begin{enumerate} \item 2D simulator for Earth-Moon system (cartesian coordinates, center-of-mass-centered). \item 3D simulator for Sun-Earth-Moon system (spherical coordinates, heliocentric) \end{enumerate} We will derive equations of motion using Hamiltonian mechanics, shortly describe symplectic integrators and integrate the equations of motion using symplectic integrators to obtain numerical algorithms. Finally we will proceed to describe the complexities of Earth-Mars transfer orbits, a simplified Earth-Mars transfer orbit with patched conic approximation. \section{Analytical Mechanics} Newton's 2nd Law is a very well known physical law of classical mechanics governing all bodies having mass. It can be stated \cite{Knudsen2002} \begin{align} \label{eq:newton2} \textbf{Newton's 2nd Law} \qquad \sum{\vec{F}} = \dfrac{d \vec{p}}{dt}. \end{align} where the left-hand side is the total force on the body and the right-hand side is the time derivative of the momentum vector defined as mass times the velocity vector \(\vec{p} = m \vec{v}\). However we will use the more powerful Hamiltonian formalism from the field of analytical mechanics, which is \cite{Knudsen2002} \begin{align} \textbf{Generalized momenta} \qquad p_i(\vec{q},\dot{\vec{q}}) &= \dfrac{\partial L}{\partial \dot{q_i}}, \qquad i = 1,\cdots, n \ , \label{eq:generalized-momenta} \\[1cm] \textbf{Hamiltonian} \qquad H(\vec{q}, \vec{p}, t) &= \sum\limits_{i=1}^n p_i \dot{q_i} - L \label{eq:hamiltonian}, \\[1cm] \textbf{Hamilton's equations} \qquad \begin{split} \label{eq:hamiltons-equations} \dot{q_i} &= +\dfrac{\partial H}{\partial p_i} , \\[0.2cm] \dot{p_i} &= -\dfrac{\partial H}{\partial q_i}. \end{split} \end{align} where $q_i$ and $p_i$ are the coordinates and the generalized momenta respectively, one for each degree of freedom, $i$ is the index of degrees of freedom (e.g. $x, y$ for a 2D cartesian coordinates or $r, \theta, \phi$ for a 3D spherical coordinates), $n$ is the total degrees of freedom, $\dot{q}$ is the time-derivative of $q$, $L = T - V$ is the Lagrangian with $T$ being the system's kinetic energy and $V$ it's potential energy. The Hamiltonian, \cref{eq:hamiltonian} corresponds to the total mechanical energy of the system\footnote{For most physical systems with some exceptions, see \cref{ch:HvsE} for details} and \cref{eq:hamiltons-equations} constitutes the equations of motion, and when solved give the solutions $\vec{q}(t)$, $\vec{p}(t)$. The reason for using Hamiltonian's equations instead of Newton's is primarily that it yields $2 n$ first order differential equations of motion for $n$ degrees of freedom (dimensions), where as Newton's 2nd Law yields $n$ 2nd order differential equations. As it turns out, first-order differential equations are more straight forward to discretize, i.e. turn into a numerical algorithm that can be typed into a computer program. For a guide on how to use Hamilton's formalism in practice, see \cref{apx:using-hamilton-mechanics}. \section{Numerical Algorithms} Once we have derived our equations of motion in the form of Hamilton's equations \cref{eq:hamiltons-equations}, we need to solve them. In 1887 and 1889, mathematicians Heinrich Bruns and Henri Poincaré showed that there is no general analytical solution to the three-body problem given by simple algebraic expressions and integrals \cite{Gowers2008}. This means have have to solve them numerically to get time time evolution of the coordinates and velocity for particular initial conditions and delta-v impulses. \subsection{Symplectic Integrators} Symplectic integrators are a class of integrators typically used in Hamiltonian systems that have the property that they preserve the energy very well over longer time. Informally, ``\textit{a symplectic map is a map which preserves the sum of areas projected onto the set of $(p_i,q_i)$ planes. It is the generalization of an area-preserving map}'' \cite{Weisstein}, as illustrated in \cref{fig:symplectic-area}. \begin{figure}[ht] \centering \includegraphics[width=0.90\linewidth]{fig/symplectic-area.png} \caption{Area preservation in phase space (corresponding to preservation of the Hamiltonian, $H$), for various symplectic numerical methods for a pendulum (Image: \cite{Hairer})} \label{fig:symplectic-area} \end{figure} Thus symplectic algorithms have been employed in fields such as molecular dynamics and astronomy. ``\textit{Symplectic methods belong to the larger class of geometric numerical integration algorithms. These algorithms are constructed so that they preserve certain geometrical properties inherent in the system. Symplectic methods are so named because, when applied to problems in Hamiltonian mechanics, the algorithms preserve the linear symplectic structure inherent in the phase space representation of the dynamics.}'' \cite{Donnelly2005}. We therefore only use Symplectic integrators for our equations. \subsubsection{Symplectic Euler (1st Order Method)} In the explicit Euler, all quantities from the Hamiltonian derivative on the right hand refer to the old time step $i$ (index 0). In the implicit, all quantities on the RHS refers to the new time step $i+1$ (index 1), which results in implicit equations that can typically only be solved numerically. The 1st order symplectic Euler is mixture between explicit and implicit Euler \cite{Hairer}: \begin{equation} \begin{split} \label{eq:symplectic-euler1} \vec{q}_1 = \vec{q}_0 + h \pd{H}{\vec{p}}(q_0, p_1) \\ \vec{p}_1 = \vec{p}_0 - h \pd{H}{\vec{q}}(q_0, p_1) \end{split} \end{equation} or \begin{equation} \begin{split} \label{eq:symplectic-euler2} \vec{q}_1 = \vec{q}_0 + h \pd{H}{\vec{p}}(q_1, p_0) \\ \vec{p}_1 = \vec{p}_0 - h \pd{H}{\vec{q}}(q_1, p_0) \end{split} \end{equation} \subsubsection{Symplectic Störmer-Verlet (2nd Order Method)} The 2nd order symplectic Störmer-Verlet (henceforth simply called Verlet) is given by \cite{Hairer}: \begin{equation} \begin{split} \label{eq:symplectic-verlet1} \vec{p}_{1/2} &= \vec{p}_0 - \frac{h}{2} \pd{H}{\vec{q}}(q_0, p_{1/2}) \\ \vec{q}_1 &= \vec{q}_0 + \frac{h}{2} \left( \pd{H}{\vec{p}}(q_0, p_{1/2}) + \pd{H}{\vec{p}}(q_1, p_{1/2}) \right) \\ \vec{p}_{1} &= \vec{p}_{1/2} - \frac{h}{2} \pd{H}{\vec{q}}(q_1, p_{1/2}) \\ \end{split} \end{equation} or \begin{equation} \begin{split} \label{eq:symplectic-verlet2} \vec{q}_{1/2} &= \vec{q}_0 + \frac{h}{2} \pd{H}{\vec{p}}(q_{1/2}, p_0) \\ \vec{p}_1 &= \vec{p}_0 - \frac{h}{2} \left( \pd{H}{\vec{q}}(q_{1/2}, p_0) + \pd{H}{\vec{q}}(q_{1/2}, p_1) \right) \\ \vec{q}_{1} &= \vec{q}_{1/2} + \frac{h}{2} \pd{H}{\vec{p}}(q_{1/2}, p_1) \\ \end{split} \end{equation} \section{CM-Centered Restricted 3-Body System (C-R3B), Earth-Moon} The center-of-mass-centered 3-body system (C-R3B) with the Earth and Moon was explored in \cite{Saxe2015}; refer to this for detailed derivations of C-R3B. We will present the final nondimensionalized equations of motion, and numerical integration algorithms for reference. The model is set up as in \cref{fig:r3b}. \begin{figure}[ht!] \centering \includegraphics[scale=0.75]{fig/r3b.pdf} \caption{Restricted three-body problem. Two coordinate systems both with center of mass as origin: $(\mathscr{X},\mathscr{Y})$ is a stationary inertial frame, $(x,y)$ is a co-rotating non-inertial frame that rotates with the moon at angular frequency $\omega$. $M =$ mass of Earth, $m =$ mass of Moon, $\vec{R} =$ vector from CM to earth, $\vec{r} =$ vector from CM to Moon, $m_s =$ mass of spacecraft, $r_{S,E} =$ distance from spacecraft to Earth, $r_{S,M} =$ distance from spacecraft to Moon. In the dimensionless variables we introduce later, unit distance is $R+r$, which makes the dimensionless constant $k = \text{the CM-Earth distance} = \dfrac{R}{R+r} = $ and $1-k = \text{the CM-Moon distance} = \dfrac{r}{R+r}$ (Image: \cite{Saxe2015}).} \label{fig:r3b} \end{figure} \subsection{C-R3B Equations of Motion} \begin{empheq}[box=\widefbox]{align} \label{eq:Xdot} \dot{X} = P_x + Y \end{empheq} \begin{empheq}[box=\widefbox]{align} \label{eq:Ydot} \dot{Y} = P_Y - X \end{empheq} \begin{empheq}[box=\widefbox]{align} \label{eq:Pdot_X} \dot{P}_X = P_Y - \dfrac{(1-k)(X+k)}{((X+k)^2+Y^2)^{3/2}} - \dfrac{k(X-1+k))}{((X-1+k))^2+Y^2)^{3/2}} \end{empheq} \begin{empheq}[box=\widefbox]{align} \label{eq:Pdot_Y} \dot{P}_Y = -P_X - \dfrac{(1-k)Y}{((X+k)^2+Y^2)^{3/2}} - \dfrac{k Y}{((X-1+k))^2+Y^2)^{3/2}} \end{empheq} where \(T\), \(X\), \(Y\), \(P_x\) and \(P_y\) are our dimensionless variables for time, positions, generalized impulse, and \(k=\dfrac{M_{\Moon}}{M_{\Earth} + M_{\Moon}}\), that is the ratio of Moon's mass to the total Earth and Moon mass. \subsection{C-R3B Numerical Algorithms} \subsubsection{C-R3B Symplectic Euler} We refer to \cite{Saxe2015} for detailed derivations. The equations of motion of the other system in the next section will be derived in details. Based on \cref{eq:Xdot,eq:Ydot,eq:Pdot_X,eq:Pdot_Y}, a symplectic Euler algorithm is found: \begin{align} X_1 &= \dfrac{X_0 + h (h P_{Y,0} + P_{X,0} + Y_0)}{1+h^2}, \\[0.4cm] Y_1 &= Y_0 + h (P_{Y,0} - X_1), \label{eq:symplectic-euler-Y_1} \end{align} \begin{align} \begin{aligned} P_{X,1} &= P_{X,0} \\ &+ h \left(P_{Y,0} - \dfrac{(1-k)(k+X_1)}{((k+X_1)^2+Y_1^2)^{3/2}} + \dfrac{k(X_1-1+k)}{((X_1-1+k)^2+Y_1^2)^{3/2}}\right), \label{eq:symplectic-euler-PX_1} \end{aligned} \\[0.4cm] \begin{aligned} P_{Y,1} &= P_{Y,0} \\ &+ h \left(-P_{X,0} - \dfrac{(1-k)Y_1}{((k+X_1)^2+Y_1^2)^{3/2}} - \dfrac{k Y_1}{((X_1-1+k)^2+Y_1^2)^{3/2}}\right), \label{eq:symplectic-euler-PY_1} \end{aligned} \end{align} where \(h\) is the step size, index \(0\) designates previous time step ($i$) and index \(1\) designates new time step ($i+1$). The algorithm is run in the same order as listed above. \subsubsection{C-R3B Symplectic Störmer-Verlet} Based on \cref{eq:Xdot,eq:Ydot,eq:Pdot_X,eq:Pdot_Y}, a symplectic Störmer-Verlet algorithm was also found (For detailed derivation, see \cite{Saxe2015}): \begin{align} X_{1/2} &= \frac{h^2 P_{Y,0} + 2 h \dot{P}_{x,0} + 4 X_0}{4 + h^2} \label{eq:verlet-x_1/2} \\ Y_{1/2} &= Y_0 + \dfrac{h}{2} (P_{Y,0} - X_{1/2}), \label{eq:verlet-y_1/2} \\ P_{X,1} &= \frac{h^2 (2 \dot{P}_{Y,0} + P_{X,0}) + 4 h \dot{P}_{X,0} + 4 P_{X,0} }{4 + h^2} \label{eq:verlet-px_1} \\ P_{Y,1} &= P_{Y,0} + \dfrac{h}{2} \left[-\dot{P}_Y(q_{1/2},p_0) -\dot{P}_Y(q_{1/2},p_1) \right], \label{eq:verlet-py_1} \\ X_1 &= X_{1/2} + \dfrac{h}{2} (P_{X,1} + Y_{1/2}), \label{eq:verlet-x_1} \\ Y_1 &= Y_{1/2} + \dfrac{h}{2} (P_{Y,1} - X_{1/2}), \label{eq:verlet-y_1} \end{align} where \(\dot{P}_X,\dot{P}_Y\) are \cref{eq:Pdot_X,eq:Pdot_Y}, \(h\) is the step size, index \(0\) designates previous time step ($i$) and \(1\) designates new time step ($i+1$). The algorithm is run in the same order as listed above. \subsubsection{Adaptive Timesteps} Furthermore the Verlet algorithm is made to take adaptive time steps. In areas relatively large accelerations, the step size is decreased and vice versa. The idea is to take both an Euler and a Verlet step, and estimate the error made in the step as the difference between the two. Let $z$ denote a vector of the position variables $(X,y)$. \begin{align} \text{Euler step result:} \qquad z_1 = z + O(h), \\ \text{Verlet step result:} \qquad z_2 = z + O(h^2), \\ \end{align} where O(h) denotes an error term of order h. Then we take the difference \begin{align} \|z_1 - z_2\| &= O(h) - O(h^2) \\ &\approx O(h)\ , \end{align} since $O(h) \gg O(h^2)$. Thus we approximate the error difference between the Euler and Verlet method as the actual error we make at step size $h$. The idea is to make both an Euler and a Verlet step for every time-step to assess the error and adjust the step size accordingly. For every single step the step size is changed either up or down, depending on the errors and tolerance. As a result we always stay near the same error in every step, only taking as small steps as necessary in each iteration. The non-adaptive algorithm is fixed in step size but varies in error per step. The adaptive algorithm varies in step size in an attempt to fix the error per step. For all simulations we have set $10^{-9}$ as the maximum tolerated error per step in the adaptive algorithm. For the non-adaptive algorithm we select a fixed step size, $10^{-6}$, to ensure that it's reasonably low most of the time. This approach was again taken directly from \cite{Saxe2015}. \section{Heliocentric Restricted 4-Body System (H-R4B), Sun-Earth-Mars} Simulating orbit from Earth to Mars requires a new model with at least four bodies: Sun, Earth, Mars and the Spacecraft. The coordinate system will be heliocentric and in spherical coordinates, and restricted\footnote{Meaning that the mass of the spacecraft is considered negligible compared to compared to the celestial masses and therefore is assumed not to affect them}, so we call this system the ``Heliocentric Restricted 4-Body System'', see \cref{fig:solar-system-model}. We will derive the equations of motion of the spacecraft by using Hamilton's equations, which gives us a set of coupled first-order differential equations, and a set of conserved quantities as ``generalized momenta''. \begin{figure}[ht] \centering \includegraphics[width=0.80\linewidth]{fig/solar-system-model} \caption{ H-R4B model of spacecraft motion.The sun is assumed stationary at the origin of a spherical coordinate system, and the focus of two elliptical orbits of Earth and Mars. (Assets source: \cite{WikiSpherical,flaticon})} \label{fig:solar-system-model} \end{figure} \subsection{Spherical Coordinate System} We adopt the spherical coordinate system which customary in astrodynamics, as shown in \cref{fig:solar-system-model}. The coordinate transform equations to and from the cartesian coordinate system are: \begin{align} x &= r \sin{\theta}\cos{\phi}, \label{eq:x(q)} \\ y &= r \sin{\theta}\sin{\phi}, \label{eq:y(q)}\\ z &= r \cos{\theta}, \label{eq:z(q)} \end{align} and \begin{align} r &= \sqrt{x^2 + y^2 + z^2}, \label{eq:r(x,y,z)}\\ \theta &= \arccos{\frac{z}{r}}, \label{eq:theta(x,y,z)}\\ % \arctan{\frac{y}{x}}, \qquad \theta \in [0, 2\pi] \phi &= \arctantwo{(y/x)} \\ \label{eq:phi(x,y,z)} \end{align} where \(r \in [0, \infty]\), \(\theta \in [0, \pi]\) and \(\phi \in [0, 2\pi]\) and \(\arctan2\) denotes the two-argument version of \(\arctan\) function \footnote{\(\arctan{(a)} \in (-\frac{\pi}{2}, \frac{\pi}{2})\) just takes a slope as input and give an angle in quadrant 1 and 4 only, whereas \(\arctantwo{(y,x)} \in (-\pi, \pi]\) takes both \(x\) and \(y\) which also gives the correct quadrant 1–4.}\cite{WikiAtan2}. For the calculation of kinetic in the next section we will need the position vector, unit vectors in spherical coordinates, and the Jacobians. The position vector in spherical coordinates is \begin{align} \vec{r}(r, \theta, \phi) &= x \xhat + y \yhat + z \zhat \\ \Leftrightarrow \vec{r}(r, \theta, \phi) &= r \sin{\theta}\cos{\phi} \xhat + r \sin{\theta}\sin{\phi} \yhat + r \cos{\theta} \zhat. \label{eq:position-vec-spherical} \end{align} The various coordinate derivatives of the position vector is \begin{align} \pd{\vec{r}}{r} &= \sin{\theta}\cos{\phi} \xhat + \sin{\theta}\sin{\phi} \yhat + \cos{\theta} \zhat \quad &\text{and} \quad &\left| \pd{\vec{r}}{r} \right| = 1 \label{eq:position-derived-r} \\ \pd{\vec{r}}{\theta} &= r \cos{\theta}\cos{\phi} \xhat + r \cos{\theta}\sin{\phi} \yhat - r \sin{\theta} \zhat \quad &\text{and} \quad &\left| \pd{\vec{r}}{\theta} \right| = r \label{eq:position-derived-theta} \\ \pd{\vec{r}}{\phi} &= -r\sin{\theta}\sin{\phi} \xhat + r\sin{\theta}\cos{\phi} \zhat \quad &\text{and} \quad &\left| \pd{\vec{r}}{\phi} \right| = r\sin{\theta} \label{eq:position-derived-phi} \end{align} So the unit vectors in spherical coordinates are \begin{align} \rhat = \dfrac{\pd{\vec{r}}{r}}{\left| \pd{\vec{r}}{r} \right| } &= \sin{\theta}\cos{\phi} \xhat + \sin{\theta}\sin{\phi} \yhat + \cos{\theta} \zhat \label{eq:r-hat}\\ \thetahat = \dfrac{\pd{\vec{r}}{\theta}}{\left| \pd{\vec{r}}{\theta} \right| } &= \cos{\theta}\cos{\phi} \xhat + \cos{\theta}\sin{\phi} \yhat - \sin{\theta} \zhat \label{eq:theta-hat}\\ \phihat = \dfrac{\pd{\vec{r}}{\phi}}{\left| \pd{\vec{r}}{\phi} \right| } &= -\sin{\phi} \xhat + \cos{\phi} \zhat \label{eq:phi-hat} \end{align} \subsection{H-R4B Equations of Motion} We will follow the 5-step process of arriving at Hamilton's equations outlined in \cref{apx:using-hamilton-mechanics}. \subsubsection{Step 1: Lagrangian \(L\)} The kinetic energy of the system is \begin{align} T = \frac{1}{2} m_s v^2. \end{align} In general the total derivative of a vector is: \begin{align} \vec{v} &= \od{\vec{r}}{t} = \sum\limits_{j} \pd{\vec{r}}{q_j} \od{q_j}{t}, \\ &= \sum\limits_{j} \left|\pd{\vec{r}}{q_j}\right| \frac{\pd{\vec{r}}{q_j}}{\left|\pd{\vec{r}}{q_j}\right|} \od{q_j}{t}, \\ &= \sum\limits_{j} \left|\pd{\vec{r}}{q_j}\right| \od{q_j}{t} \unitvector{q}_j, \\ &= \sum\limits_{j} \left|\pd{\vec{r}}{q_j}\right| \dot{q_j} \unitvector{q}_j, \end{align} where we have used that a unit vector for any coordinate system is \(\frac{\pd{\vec{r}}{q_j}}{\left|\pd{\vec{r}}{q_j}\right|} = \unitvector{q}_j\). We can now use the derivatives of the position vector we found earlier in \cref{eq:position-derived-r,eq:position-derived-theta,eq:position-derived-phi}, to substitute for \(\left|\pd{\vec{r}}{q_j}\right|\). We don't have to substitute in \(\unitvector{q}_j\) since our unit vectors are orthogonal, so they will yield either 0 or 1 when we square \(v\). So for the spherical coordinates we get: \begin{align} \vec{v} = \dot{r}\rhat + r\dot{\theta}\thetahat + r\sin{\theta}\,\dot{\phi}\phihat \end{align} so \begin{align} v^2 = \dot{r}^2 + r^2\dot{\theta}^2 + r^2\sin^2{\theta}\,\dot{\phi}^2 \end{align} so we get \begin{align} T = \frac{1}{2} m_s (\dot{r}^2 + r^2\dot{\theta}^2 + r^2\sin^2{\theta}\,\dot{\phi}^2). \end{align} The gravitational potential is \cite{WikiGravPotential} \begin{align} V = -G m_s \sum\limits_{k} \frac{M_k}{\left| \vec{r} - \vec{r_k} \right|}, \end{align} where \(i\) denotes the celestial bodies acting on the spacecraft, i.e. in our system Sun, Earth and Mars. So we finally get the Lagrangian \begin{align} L &= T - V \\ \Leftrightarrow L &= \frac{1}{2} m_s (\dot{r}^2 + r^2\dot{\theta}^2 + r^2\sin^2{\theta}\,\dot{\phi}^2) + G m_s \sum\limits_{k} \frac{M_k}{\left| \vec{r} - \vec{r_k} \right|} \end{align} \subsubsection{Step 2: Generalized Momenta \(p_j\)} \begin{align} p_r &= \pd{L}{\dot{r}} = m_s \dot{r} \label{eq:pr} \\ p_\theta &= \pd{L}{\dot{\theta}} = m_s r^2 \dot{\theta} \label{eq:ptheta} \\ p_\phi &= \pd{L}{\dot{\phi}} = m_s r^2 \sin^2{\theta} \dot{\phi} \label{eq:pphi} \end{align} We recognize the three generalized momenta as linear momentum (\cref{eq:pr}) and angular momentum (\cref{eq:ptheta,eq:pphi}), respectively. \subsubsection{Step 3: \(\dot{q} = \dot{q}_j(\vec{q}, \vec{p}, t)\)} \begin{align} \dot{r} &= \frac{p_r}{m_s} \\ \dot{\theta} &= \frac{p_\theta}{m_s r^2} \\ \dot{\phi} &= \frac{p_\phi}{m_s r^2 \sin^2{\theta}} \end{align} We can now rewrite \(L\) to make it independent of \(\dot{q}\) by substituting the expressions above: \begin{align} T &= \frac{1}{2} m_s (\dot{r}^2 + r^2\dot{\theta}^2 + r^2\sin^2{\theta}\,\dot{\phi}^2) \\ &= \frac{1}{2} m_s \left(\frac{p_r^2}{m_s^2} + r^2\frac{p_\theta^2}{m_s^2 r^4} + r^2\sin^2{\theta}\frac{p_\phi^2}{m_s^2 r^4 \sin^4{\theta}} \right) \\ &= \frac{p_r^2}{2 m_s} + \frac{p_\theta^2}{2 m_s r^2} + \frac{p_\phi^2}{2 m_s r^2 \sin^2{\theta}} \\ \end{align} So using \(L = T - V\) we have \begin{align} L = \frac{p_r^2}{2 m_s} + \frac{p_\theta^2}{2 m_s r^2} + \frac{p_\phi^2}{2 m_s r^2 \sin^2{\theta}} + G m_s \sum\limits_{k} \frac{M_k}{\left| \vec{r} - \vec{r_k} \right|} \end{align} \subsubsection{Step 4: Hamiltonian \(H\)} \begin{align} H(\vec{q}, \vec{p}, t) &= \sum\limits_{j}p_j \dot{q_j} - L \\ &= \sum\limits_{j}p_j \dot{q_j}(\vec{q}, \vec{p}, t) - L \\ &= p_r \frac{p_r}{m_s} + p_\theta \frac{p_\theta}{m_s r^2} + p_\phi + \frac{p_\phi}{m_s r^2 \sin^2{\theta}} \notag \\ &- \left( \frac{p_r^2}{2 m_s} + \frac{p_\theta^2}{2 m_s r^2} + \frac{p_\phi^2}{2 m_s r^2 \sin^2{\theta}} + G m_s \sum\limits_{k} \frac{M_k}{\left| \vec{r} - \vec{r_k} \right|} \right) \\ \Leftrightarrow H &= \frac{p_r^2}{2 m_s} + \frac{p_\theta^2}{2 m_s r^2} + \frac{p_\phi^2}{2 m_s r^2 \sin^2{\theta}} - G m_s \sum\limits_{k} \frac{M_k}{\left| \vec{r} - \vec{r_k} \right|} \end{align} At this point we want to expand the expression \(\left| \vec{r} - \vec{r_k} \right|\) in anticipation of needing to differentiate \(H\) with respect to the coordinates. Using the equations for the \(x, y, z\) coordinates \cref{eq:x(q),eq:y(q),eq:z(q)} we get \begin{align} d_k = \left|\vec{r} - \vec{r}_k \right| &= \sqrt{(x-x_k)^2 + (y-y_k)^2 + (z-z_k)^2} \end{align} and (colors added to show which terms factor into each other in next step) \begin{align} (x-x_k)^2 &= (r\sin{\theta}\cos{\phi} - r\sin{\theta}\cos{\phi})^2 \notag \\ &= \tikz[baseline]{ \node[fill=orange!20,anchor=base] {\(r^2\sin^2{\theta}\cos^2{\phi}\)} } + \tikz[baseline]{ \node[fill=red!20,anchor=base] {\(r_k^2\sin^2{\theta_k}\cos^2{\phi_k}\)} } - 2r r_k \sin{\theta}\sin{\theta_k}\cos{\phi}\cos{\phi_k} \label{eq:x-dist-squared} \\ (y-y_k)^2 &= (r\sin{\theta}\sin{\phi} - r\sin{\theta}\sin{\phi})^2 \notag \\ &= \tikz[baseline]{ \node[fill=orange!20,anchor=base] {\(r^2\sin^2{\theta}\sin^2{\phi}\)} } + \tikz[baseline]{ \node[fill=red!20,anchor=base] {\(r_k^2\sin^2{\theta_k}\sin^2{\phi_k}\)} } - 2r r_k \sin{\theta}\sin{\theta_k}\sin{\phi}\sin{\phi_k} \label{eq:y-dist-squared} \\ (z-z_k)^2 &= (r\cos{\theta} - r\cos{\theta})^2 \notag \\ &= \tikz[baseline]{ \node[fill=orange!20,anchor=base] {\(r^2\cos^2{\theta}\)} } + \tikz[baseline]{ \node[fill=red!20,anchor=base] {\(r_k^2\cos^2{\theta_k}\)} } - 2r r_k \cos{\theta}\cos{\theta_k}. \label{eq:z-dist-squared} \end{align} Now adding all three \cref{eq:x-dist-squared,eq:y-dist-squared,eq:z-dist-squared} the orange terms factor into \(r^2\) and the red terms factor into \(r_k^2\) and we get \begin{align} d_k &= \sqrt{ \tikz[baseline]{\node[fill=orange!20,anchor=base]{\(r^2\)}} + \tikz[baseline]{\node[fill=red!20,anchor=base]{\(r_k^2\)}} -2r r_k(\cos{\theta}\cos{\theta_k}+\sin{\theta}\sin{\theta_k}( \tikz[baseline]{\node[fill=blue!20,anchor=base]{ \(\cos{\phi}\cos{\phi_k} + \sin{\phi}\sin{\phi_k}\)}} )) } \\ &= \sqrt{r^2 + r_k^2 - 2 r r_k(\cos{\theta}\cos{\theta_k}+\sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k}))}, \end{align} where the blue factor was simplified using the sum rule: \\ \(\cos{\alpha}\cos{\beta} + \sin{\alpha}\sin{\beta} = cos{(\alpha-\beta)}\) \cite{WeissteinTrig}. So we can finally express \(H\) as \begin{equation} \begin{aligned} H &= \frac{p_r^2}{2 m_s} + \frac{p_\theta^2}{2 m_s r^2} + \frac{p_\phi^2}{2 m_s r^2 \sin^2{\theta}} \\ &- G m_s \sum\limits_{k} \frac{M_k}{\sqrt{r^2 + r_k^2 - 2 r r_k \left[\cos{\theta}\cos{\theta_k}+\sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k})\right]}} \end{aligned} \end{equation} \subsubsection{Step 5: Hamilton's Equations} \begin{align} \dot{r} = \pd{H}{p_r} &= \frac{p_r}{m_s} \label{eq:rdot} \\[0.3cm] \dot{\theta} = \pd{H}{p_\theta} &= \frac{p_\theta}{m_s r^2} \label{eq:thetadot} \\[0.3cm] \dot{\phi} = \pd{H}{p_\phi} &= \frac{p_\phi}{m_s r^2 \sin^2{\theta}} \label{eq:phidot} \\[0.3cm] \begin{split} \dot{p}_r = -\pd{H}{r} &= \frac{p_\theta^2}{m_s r^3} + \frac{p_\phi^2}{m_s r^3 \sin^2{\theta} } \\ &+ G m_s \sum\limits_{k} M_k \frac{-r + r_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)}\right)}{\left[r^2 + r_k^2 - 2 r r_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \label{eq:prdot} \end{split} \\[0.3cm] \begin{split} \dot{p}_\theta = -\pd{H}{\theta} &= \frac{p_\phi^2}{m_s r^2 \sin^2{\theta} \tan{\theta}} \\ &+ G m_s \sum\limits_{k} M_k \frac{r r_k \left[-\sin{\theta}\cos{\theta_k} + \cos{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right]}{\left[r^2 + r_k^2 - 2 r r_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \label{eq:pthetadot} \end{split} \\[0.3cm] \begin{split} \dot{p}_\phi = -\pd{H}{\phi} &= G m_s \sum\limits_{k} M_k \frac{- r r_k \sin{\theta}\sin{\theta_k}\sin{(\phi - \phi_k)}}{\left[r^2 + r_k^2 - 2 r r_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \label{eq:pphidot} \end{split} \end{align} Those are our equations of motion. However before we try to solve them, we will remove all units. \subsection{H-R4B Equations Nondimensionalized} We will now choose suitable characteristic units, which has a number of benefits: \begin{itemize} \item The equations gets slightly simplified \item The order of the effects of different forces in the system becomes more apparent. \item Many of the calculations in numerical algorithms happens at an order of about 1, which is desireable for decreasing round-off error due to finite machine precision. \end{itemize} The characteristic units are chosen as: \begin{align} \text{Unit length: } k_r &= a_{\Earth}\ {\color{gray} \approx \SI{1.50e8}{\km}\ \text{(1 Earth orbit semi-major axis)}} \\[0.2cm] \text{Unit time: } k_t &= T_{\Earth} = \frac{2\pi}{\omega_\Earth} = 2\pi \sqrt{\frac{a_\Earth^3}{G M_\Sun}}\ {\color{gray} \approx \SI{3.16e7}{\s}\ \text{ (1 year)}} \\[0.2cm] \text{Unit speed: } k_v &= \frac{k_r}{k_t} = \frac{a_\Earth \omega_\Earth}{2\pi} = \frac{1}{2\pi} \sqrt{\frac{G M_\Sun}{a_\Earth}}\ {\color{gray} \approx \SI{4.74}{\km/\s}\ \text{(1 AU/y)}} \end{align} where \(a_\Earth\) is the semi-major axis of Earth's orbit in kilometers, see \cref{fig:earth-semi-major-axis}, \(T_\Earth\) is Earth's orbital period (i.e. 1 year) in seconds and the characteristic speed thus becomes Earth's average orbital speed with respect to the sun. We don't need a characteristic mass \(m_s\) since the mass cancels out in the equations of motion for the quantity we care about, delta-v. The unit for time is expressed in seconds because we found heuristically we needed time steps on the order of \(\SI{1}{\s}\) to maintain an error per step of about \num{10e-9}. The unit for length is expressed in kilometers because it is customary to use \si{\km/\s} as unit of speed for celestial objects. \begin{figure}[ht] \centering \includegraphics[width=0.60\linewidth]{fig/earth-semi-major-axis} \caption{Semi-major axis of Earth's elliptical orbit, used as the characteristic length of the system} \label{fig:earth-semi-major-axis} \end{figure} \clearpage The nondimensionalization is done in \cref{apx:hr4b-nondimensionalization}. The resulting equations and the nondimensionalized Hamiltonian is: \begin{equation} \tag{2.67} \boxed{ \dot{R} = B_R } \end{equation} \begin{equation} \tag{2.72} \boxed{ \dot{\theta} = \frac{B_\theta}{R^2} } \end{equation} \begin{equation} \tag{2.76} \boxed{ \dot{\phi} = \frac{B_\phi}{R^2 \sin^2{\theta}} } \end{equation} \begin{equation} \tag{2.86} \boxed{ \!\begin{aligned} \dot{B}_r = &\frac{B_\theta^2}{R^3} + \frac{B_\phi^2}{R^3 \sin^2{\theta}} \\ & + \sum\limits_{k} \eta_k \frac{-R + R_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)}\right)}{\left[R^2 + R_k^2 - 2 R R_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \end{aligned} } \end{equation} \begin{equation} \tag{2.87} \boxed{ \!\begin{aligned} \dot{B}_\theta = &\frac{B_\phi^2}{R^2 \sin^2{\theta} \tan{\theta}} \\ &+ \sum\limits_{k} \eta_k \frac{R R_k \left[-\sin{\theta}\cos{\theta_k} + \cos{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right]}{\left[R^2 + R_k^2 - 2 R R_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \end{aligned} } \end{equation} \begin{equation} \tag{2.88} \boxed{ \!\begin{aligned} \dot{B}_\phi = &\sum\limits_{k} \eta_k \frac{- R R_k \sin{\theta}\sin{\theta_k}\sin{(\phi - \phi_k)}}{\left[R^2 + R_k^2 - 2 R R_k \left(\cos{\theta}\cos{\theta_k} + \sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k)} \right) \right]^{3/2}} \end{aligned} } \end{equation} \vspace{0.2cm} where we renamed $p$ to $B$ because we divided by spacecraft mass $m_s$ on both sides, thus $\dot{B}_r$ is now linear velocity along r-coordinate and $\dot{B}_\theta$ and $\dot{B}_\phi$ are now angular velocities. For reference, the nondimensionalized Hamiltonian per spacecraft mass: \begin{equation} \tag{2.105} \begin{aligned} \mathcal{H}_m &= \frac{B_r^2}{2} + \frac{B_\theta^2}{2 R^2} + \frac{B_\phi^2}{2 R^2 \sin^2{\theta}} \\ &- \sum\limits_{k} \eta_k \frac{1}{\sqrt{R^2 + R_k^2 - 2 R R_k \left[\cos{\theta}\cos{\theta_k}+\sin{\theta}\sin{\theta_k}\cos{(\phi - \phi_k})\right]}} \end{aligned} \end{equation} \subsection{H-R4B Numerical Integration Algorithm} \subsubsection{Symplectic Euler for H-R4B System} One can choose whichever of the two Symplectic Euler versions \cref{eq:symplectic-euler1,eq:symplectic-euler2} is easier to solve for the equations at hand. We choose the latter form \cref{eq:symplectic-euler2} so with our Hamiltonian \cref{eq:HH_m} we get \begin{align} R_1 &= R_0 + h B_{r,0} \label{eq:symplectic-euler-R_1}, \\[0.4cm] \theta_1 &= \theta_0 + h \frac{B_{\theta,0}}{R_1^2}, \label{eq:symplectic-euler-theta_1} \\[0.4cm] \phi_1 &= \phi_0 + h \frac{B_{\phi,0}}{R_1^2 \sin^2{\theta_1}}, \label{eq:symplectic-euler-phi_1} \end{align} \begin{align} \begin{aligned} B_{r,1} = & B_{r,0} + h \left[ \frac{B_{\theta,0}^2}{R_1^3} + \frac{B_{\phi,0}^2}{R_1^3 \sin^2{\theta_1}} \right. \\ & \hspace{-21.0pt} + \left. \sum\limits_{k} \eta_k \frac{-R_1 + R_{k,1} \left(\cos{\theta_1}\cos{\theta_{k,1}} + \sin{\theta_1}\sin{\theta_{k,1}}\cos{(\phi_1 - \phi_{k,1})}\right)}{\left[R_1^2 + R_{k,1}^2 -2 R_1 R_{k,1}^2 \left(\cos{\theta_1}\cos{\theta_{k,1}} + \sin{\theta_1}\sin{\theta_{k,1}}\cos{(\phi_1 - \phi_{k,1})} \right) \right]^{3/2}} \right] , \label{eq:symplectic-euler-Br_1} \end{aligned} \\[0.4cm] \begin{aligned} B_{\theta,1} = & B_{\theta,0} + h \left[ \frac{B_{\phi,0}^2}{R_1^2 \sin^2{\theta_1} \tan{\theta_1}} \right. \\ & \hspace{-21.0pt} + \left. \sum\limits_{k} \eta_k \frac{R_1 R_{k,1} \left[-\sin{\theta_1}\cos{\theta_{k,1}} + \cos{\theta_1}\sin{\theta_{k,1}}\cos{(\phi_1 - \phi_{k,1})} \right]}{\left[R_1^2 + R_{k,1}^2 -2 R_1 R_{k,1}^2 \left(\cos{\theta_1}\cos{\theta_{k,1}} + \sin{\theta_1}\sin{\theta_{k,1}}\cos{(\phi_1 - \phi_{k,1})} \right) \right]^{3/2}} \right], \label{eq:symplectic-euler-Btheta_1} \end{aligned} \\[0.4cm] \begin{aligned} B_{\phi,1} = & B_{\phi,0} + h \left[ \vphantom{\frac12} \right.\\ & \hspace{-21.0pt} \hphantom{+} \left. \sum\limits_{k} \eta_k \frac{- R_1 R_{k,1} \sin{\theta_1}\sin{\theta_{k,1}}\sin{(\phi_1 - \phi_{k,1})}}{\left[R_1^2 + R_{k,1}^2 -2 R_1 R_{k,1}^2 \left(\cos{\theta_1}\cos{\theta_{k,1}} + \sin{\theta_1}\sin{\theta_{k,1}}\cos{(\phi_1 - \phi_{k,1})} \right) \right]^{3/2}} \right]. \label{eq:symplectic-euler-Bphi_1} \end{aligned} \end{align} As we can see, we are lucky with this Hamiltonian since we can simply run all steps in the order above, \cref{eq:symplectic-euler-R_1,eq:symplectic-euler-theta_1,eq:symplectic-euler-phi_1,eq:symplectic-euler-Br_1,eq:symplectic-euler-Btheta_1,eq:symplectic-euler-Bphi_1} without needing to solve any implicit equations. \subsubsection{Symplectic Verlet for H-R4B System} These equations of motion follow the procedures of \cref{eq:symplectic-verlet2}. This algorithm was derived and implemented, but not thoroughly tested. In the interest of time and space, we refer to \cref{apx:symplectic-verlet-derivations} for derivation and equations. \section{Interplanetary Transfer Orbits} \subsection{Transfer Orbit to Mars: 2D Patched Conic Approximation} \label{sec:2d-patched-conic} Get get a sense of the initial conditions that will bring us on a transfer orbit from Earth to Mars, we are interested in the following key numbers: \begin{enumerate} \item Delta-v for Mars Orbit Insertion (MOI) at Earth. \item Delta-v for arrival into circular orbit at Mars. \item Relative positions of Earth and Mars for optimal Hohmann trajectory. \item Travel time for optimal Hohmann trajectory. \end{enumerate} Knowing this we have a first good guess on initial conditions to put into the interplanetary 3D simulator, for which we can search for optimal parameters nearby for some good low-delta-v trajectories to mars. To approximate these numbers we can use the \emph{patched conic approximation}. First we recall that there are four possible orbit types in a two-body system that are all conic sections: circular, elliptic, parabolic and hyperbolic (and the circular is actually a special case of the elliptic), see \cref{fig:conics-and-orbits}. \begin{figure}[ht] \centering \subfloat[Conic sections. The parabola is created from an intersecting plane parallel to the cone (Image: \cite{MagisterMathematicae} (modified)).]{ \includegraphics[width=0.47\linewidth]{fig/Conic_Sections2.pdf} \label{fig:conic-sections} } \hfill \subfloat[Examples of the four orbit types of a two-body system and their eccentricities (Image: \cite{Seahen} (modified)).]{ \includegraphics[width=0.40\linewidth]{fig/Eccentricity.pdf} \label{fig:orbit-types} } \caption{The four types of conic sections, corresponding to the four kinds of orbits for a two-body gravitational system. Some authors count three types, with the circle being a special case of the ellipse.} \label{fig:conics-and-orbits} \end{figure} These four orbit types are characterized by the orbit specific energy (mechanical energy per mass) or equivalently by the ranges of eccentricity, see table in \cref{tab:orbit-type-properties}. As the table shows, a negative mechanical energy corresponds to a closed orbit (either circular or elliptical), zero energy corresponds to a parabolic orbit, i.e. an escape velocity orbit that occurs when a satellite is moving with \emph{just} enough speed with respect to a central body that it's speed goes towards zero at infinity. Finally, positive mechanical energy corresponds to hyperbolic orbits. \begin{table}[tbp] \centering \begin{tabular}{@{}llll@{}} \toprule Conic Section & Eccentricity & Semi-major axis & Energy \\ \midrule Circle & $0$ & = radius & $<0$ \\ Ellipse & $0 < e < 1$ & $>0$ & $<0$ \\ Parabola & 1 & infinity & $0$ \\ Hyperbola & $>1$ & $<0$ & $>0$ \\ \bottomrule \end{tabular} \caption{Properties of the four orbit types, characterized by their specific energy and eccentricity (Source: \cite{Braeunig}).} \label{tab:orbit-type-properties} \end{table} For a interplanetary transfer orbit we can use the Hohmann transfer orbit as the ``base transfer orbit'' and make some modifications. We recall that the Hohmann transfer is an elliptical orbit that brings a satellite between two different circular orbits around a central body by applying an instantaneous burn twice as depicted back in \cref{fig:hohmann}. This assumes only one central body. However in our system, we have three central bodies of interest: Earth, Sun and Mars. Each is the dominant body at various points of the journey. We start in a circular orbit around Earth, burn into an elliptical orbit around the Sun and arrive to Mars, burning to a circular orbit again. We can model the transfer orbit to Mars by patching three conic sections: \begin{enumerate} \item Hyperbolic departure orbit (Geocentric reference frame). \item Elliptical transfer orbit (Heliocentric reference frame). \item Hyperbolic arrival orbit (Mars-centric reference frame). \end{enumerate} \cref{fig:Hohmann-to-mars-heliocentric} show necessary speeds on Hohmann orbit and the orbit speed of Earth and Mars. \begin{figure}[ht] \centering \includegraphics[width=0.7\linewidth]{fig/Hohmann-to-mars-heliocentric.png} \caption{Hohmann transfer to Mars with necessary departure speed (periapsis at Earth) and arrival speed (apoapsis at Mars) on Hohmann orbit relative to Sun (Image: \cite[p.~127]{Rapp2016} (modified)).} \label{fig:Hohmann-to-mars-heliocentric} \end{figure} Why are the departure and arrival orbits modelled as hyperbolic? We need extra speed from LEO to enter a transfer orbit that intersects Mars, meaning we need some velocity in excess to escape velocity. Likewise we \emph{always} arrive at other bodies in a hyperbolic orbit as seen from that body; if we started with zero speed approaching from infinitely far away and waited for infinity, we would get the escape velocity when reaching the planet (this is just escaping with the exact escape velocity in reverse). But since we in practice always come speed already, relative to the body we approach (here Mars), we arrive with speed in excess to the escape velocity, hence along a hyperbolic orbit, as shown in table in \cref{tab:orbit-type-properties}. As \cref{fig:Hohmann-to-mars-heliocentric} shows, we arrive at Mars' orbit with less speed than Mars \emph{if Mars wasn't there}, but actually need to slow down due to the attraction of Mars that results in higher speed to necessary for a close circular orbit at target altitude \SI{125}{\km}. Thus overall we get a hyperbolic departure orbit, patched with an elliptical orbit, patched with another hyperbolic arrival orbit, with required speeds as illustrated in figure \cref{fig:Hohmann-to-mars-geocentric-areocentric}. \begin{figure}[ht] \centering \includegraphics[width=0.7\linewidth]{fig/Hohmann-to-mars-geocentric-areocentric.png} \caption{Hohmann transfer to Mars with necessary departure, arrival and parking speeds relative to Earth on the left and Mars on the right (Image: \cite[p.~133]{Rapp2016} (modified)).} \label{fig:Hohmann-to-mars-geocentric-areocentric} \end{figure} All the numbers of \cref{fig:Hohmann-to-mars-heliocentric,fig:Hohmann-to-mars-geocentric-areocentric} are derived in \cref{apx:mars-hohmann-derivations}, answering our four questions from the beginning of the chapter: \begin{enumerate} \item \SI{3.62}{\km\per\s} needed to depart Earth circular orbit at altitude 160 km on Mars Orbit Insertion (MOI). \item \SI{-2.11}{\km\per\s} needed at Mars arrival for circular orbit at \SI{125}{\km} altitude. Just for illustration, we could also apply less burn at Mars at the expense of entering a highly elliptical orbit as \cref{fig:mars-arrival-orbit} shows. \item Mars must be $44^\degree$ behind Earth at MOI in order for Mars to be at Hohmann orbit apoapsis at the same time, see \cref{fig:Hohmann-angle}. This happens about every 26 months. \item Travel time for optimal Hohmann trajectory is around 260 days, or around 8.5 months. \end{enumerate} These numbers are an important approximate reference point to have when attempting to find low energy transfer orbits to Mars. \begin{figure}[ht] \centering \includegraphics[width=0.35\linewidth]{fig/Hohmann-angle.png} \caption{The angle that Mars travels during the Mars Hohmann transfer orbit can easily be calculated as roughly \(136^\degree\), meaning the optimal launch is when Mars is \(44^\degree\) ahead of Earth in orbit (Image: \cite{Stern}).} \label{fig:Hohmann-angle} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.85\linewidth]{fig/mars-arrival-orbit.png} \caption{Various closed orbits upon arrival depending on applied $\Delta v$ (Image: \cite[p.~137]{Rapp2016} (modified)).} \label{fig:mars-arrival-orbit} \end{figure} \clearpage \subsection{Earth-Mars Transfer in 3D} \subsubsection{Ignored Complications} In the section above there are a number of complications that we have ignored. For example, Earth and Mars have different eccentricities (0.0167 vs. 0.0934) meaning that its angular velocity changes considerably depending on whether it is near perihelion or aphelion at the time of transfer. For a better estimate we can no longer consider the orbit to be circular. \cite{Braeuniga} gives a great analysis of the Earth-Mars Hohmann transfer orbit in 3D. \subsubsection{Launch into The Right Plane – Actual Hohmann Transfer Orbit in 3D} In the previous section we detailed the important patched conic sections model in 2D. In our 3D simulator we will attempt something similar, but take the inclination of Mars' orbital plane into account. Mars orbital inclination is $1.85^\degree$ to the ecliptic plane. Therefore if one naively tried to rendezvous with Mars in the ecliptic plane it would be \( 1.52\ \text{AU} \cdot 1.85^\degree \pi / 180 = \SI{7.4e6}{\km} \) away. Instead we will attempt a 3D Hohmann transfer orbit as illustrated in \cref{fig:hohmann-transfer-orbit-3D} using the same launch parameters as was found in the 2D hohmann model in \cref{sec:2d-patched-conic}, but allowing the \(\phi\) angle to vary a bit to get slightly out of the plane. \begin{figure}[ht] \centering \includegraphics[width=0.80\linewidth]{fig/hohmann-transfer-orbit-3D.png} \caption{Hohmann orbit in a 3D model needs a small $\Delta v$ in the $\phi$ direction to get out of the plane in such a way that it will be in the orbital plane at Mars at apoapsis (Image: \cite{Daedalis.de})} \label{fig:hohmann-transfer-orbit-3D} \end{figure}
{ "alphanum_fraction": 0.6784155442, "avg_line_length": 65.56, "ext": "tex", "hexsha": "a94a6dee5948bc480ca95bacdd05684744090cf0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "GandalfSaxe/letomes", "max_forks_repo_path": "report/chapters/3-Physical-Modelling.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "GandalfSaxe/letomes", "max_issues_repo_path": "report/chapters/3-Physical-Modelling.tex", "max_line_length": 755, "max_stars_count": null, "max_stars_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "GandalfSaxe/letomes", "max_stars_repo_path": "report/chapters/3-Physical-Modelling.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14448, "size": 42614 }
\xname{datalog_analysis} \chapter{Datalog Analysis} \label{chap:datalog-analysis} A common way to rapidly prototype program analyses in Chord is using a declarative logic-programming language called Datalog. This chapter describes all aspects of Datalog analysis in Chord. Section \ref{sec:writing-datalog-analysis} explains how to write a Datalog analysis, Section \ref{sec:running-datalog-analysis} explains how to run it, and Section \ref{sec:tuning-datalog-analysis} explains how to tune its performance. Finally, Section \ref{sec:bdd} explains the representation of BDDs (Binary Decision Diagrams) which are a data structure central to executing Datalog analyses. \section{Writing a Datalog Analysis} \label{sec:writing-datalog-analysis} A Datalog analysis declares a bunch of input/output program relations, each over one or more program domains, and provides a bunch of rules (constraints) specifying how to compute the output relations from the input relations. An example of such an analysis is shown in Figure \ref{fig:datalog-analysis}. \begin{figure} {\small \begin{verbatim} # name=datarace-dlog # Program domains .include "E.dom" .include "F.dom" .include "T.dom" # BDD variable order .bddvarorder E0xE1_T0_T1_F0 # Input/intermediate/output program relations field(e:E0,f:F0) input write(e:E0) input reach(t:T0,e:E0) input alias(e1:E0,e2:E1) input escape(e:E0) input unguarded(t1:T0,e1:E0,t2:T1,e2:E1) input hasWrite(e1:E0,e2:E1) candidate(e1:E0,e2:E1) datarace(t1:T0,e1:E0, t2:T1,e2:E1) output # Analysis constraints hasWrite(e1,e2) :- write(e1). hasWrite(e1,e2) :- write(e2). candidate(e1,e2) :- field(e1,f), field(e2,f), hasWrite(e1,e2), e1 <= e2. datarace(t1,e1,t2,e2) :- candidate(e1,e2), reach(t1,e1), reach(t2,e2), \ alias(e1,e2), escape(e1), escape(e2), unguarded(t1,e1,t2,e2). \end{verbatim} } \label{fig:datalog-analysis} \caption{An Example Datalog Analysis.} \end{figure} Any line that begins with a {\tt \#} is regarded a comment, except a line of the form ``{\tt \#} {\tt name=<...>}", which specifies the name {\tt <...>} of the Datalog analysis. Each Datalog analysis is expected to have exactly one such line. The above Datalog analysis is named {\tt datarace-dlog}. In Chord, all Datalog analysis names have suffix {\tt -dlog} and all Java analysis names have suffix {\tt -java}, but this is merely a convention that analysis writers are free to deviate from. The name of each analysis, written in Datalog or in Java, is expected to be unique across all analyses in scope (i.e., across all analyses in paths specified by properties \code{chord.dlog.analysis.path} and \code{chord.java.analysis.path}). The {\tt .include "<...>.dom"} lines specify each program domain named {\tt <...>} that is needed by the Datalog analysis, i.e., each domain over which any program relation that is input/output by the Datalog analysis is defined. The declaration of each such relation specifies the domain of each of the relation's attributes. If the same domain appears in multiple attributes of a relation then contiguous integers starting from 0 must be used to distinguish them; for instance, in the above example, {\tt candidate} is a binary relation, both of whose attributes have domain E, and they are distinguished as E0 and E1. Each relation is represented symbolically (as opposed to explicitly) using a graph-based data structure called a Binary Decision Diagram (BDD for short). Each domain containing N elements is assigned log2(N) BDD variables. The size of a BDD and the efficiency of operations on it depends heavily on the order of these BDD variables. The {\tt .bddvarorder <...>} line in the Datalog analysis enables the Datalog analysis writer to specify this order. It must list all domains along with their numerical suffixes, separated by {\tt \_} or {\tt x}. Using a {\tt \_} between two domains, such as {\tt T0\_T1}, means that the BDD variables assigned to domain {\tt T0} precede those assigned to domain {\tt T1} in the BDD variable order for this Datalog analysis. Using a {\tt x} between two domains, such as {\tt E0xE1}, means that the BDD variables assigned to domains {\tt E0} and {\tt E1} will be interleaved in the BDD variable order for this Datalog analysis. See Section \ref{sec:bdd} for more details on BDD representations. Each Datalog analysis rule is a Horn clause of the form {\tt R(t) :- R1(t1), ..., Rn(tn)} meaning that if relations {\tt R1}, ..., {\tt Rn} contain tuples {\tt t1}, ..., {\tt tn} respectively, then relation {\tt R} contains tuple {\tt t}. A backslash may be used at the end of a line to break long rules for readability. The Datalog analysis solver bddbddb used in Chord does not apply any sophisticated optimizations to simplify the rules; besides the BDD variable order, the manner in which these rules are expressed heavily affects the performance of the solver. For instance, an important manual optimization involves breaking down long rules into multiple shorter rules communicating via intermediate relations. See Section \ref{sec:tuning-datalog-analysis} for hints on tuning the performance of Datalog analyses. \section{Running a Datalog Analysis} \label{sec:running-datalog-analysis} Chord interprets each file with extension {\tt .dlog} or {\tt .datalog} in the path specified by property \code{chord.dlog.analysis.path} as a program analysis expressed in Datalog. {\bf Under Construction} \section{Tuning a Datalog Analysis} \label{sec:tuning-datalog-analysis} There are several tricks analysis writers can try to improve the performance of bddbddb, the Datalog solver used by Chord, often by several orders of magnitude. Try these tricks by running the following command: \begin{verbatim} prompt> ant -Ddlog.file=<file> -Dwork.dir=<dir> solve \end{verbatim} where {\tt <file>} denotes the file defining the Datalog analysis to be tuned, and {\tt <dir>} is the directory containing the program domains ({\tt *.dom} files) and program relations ({\tt *.bdd} files) consumed by the analysis (this is by default the \code{chord_output/bddbddb/} directory generated by a previous run of Chord. \begin{enumerate} \item Set properties \verb+noisy=yes+, \verb+tracesolve=yes+, and \verb+fulltracesolve=yes+ on the above command line and observe which rule gets ``stuck" (i.e., takes several seconds to solve). \verb+fulltracesolve+ is seldom useful, but \verb+noisy+ and \verb+tracesolve+ are often very useful. Once you identify the rule that is getting stuck, it will also tell you which relations and which domains used in that rule, and which operation on them, is taking a long time to solve. Then try to fix the problem with that rule by doing either or both of the following: \begin{itemize} \item Break down the rule into multiple rules by creating intermediate relations (the more relations you have on the RHS of a rule the slower it generally takes to solve that rule). \item Change the relative order of the domains of those relations in the BDD variable order (note that you can use either `\_' or `x' between a pair of domains). \end{itemize} \item Once you have ensured that none of the rules is getting ``stuck", you will notice that some rules are applied too many times, and so although each application of the rule itself isn't taking too much time, the cumulative time for the rule is too much. After finishing solving a Datalog analysis, bddbddb prints how long each rule took to solve (both in terms of the number of times it was applied and the cumulative time it took). It sorts the rules in the order of the cumulative time. You need to focus on the rules that took the most time to solve (they will be at the bottom of the list). Assuming you removed the problem of rules getting ``stuck", the rules will roughly be in the order of the number of times they were applied. Here is an example: \begin{verbatim} OUT> Rule VH(u:V0,h:H0) :- VV(u:V0,v:V1), VH(v:V1,h:H0), VHfilter (u:V0,h:H0). OUT> Updates: 2871 OUT> Time: 6798 ms OUT> Longest Iteration: 0 (0 ms) OUT> Rule IM(i:I0,m:M0) :- reachableI(i:I0), specIMV(i:I0,m:M0,v:V0), VH(v:V0,_:H0). OUT> Updates: 5031 OUT> Time: 6972 ms OUT> Longest Iteration: 0 (0 ms) \end{verbatim} Notice that the second rule was applied 5031 times whereas the first was applied 2871 times. More importantly, the second rule took 6972 milliseconds in all, compared to 6798 for the first rule. Hence, you should focus on the second rule first, and try to speed it up. This means that you should focus only on relations IM, reachableI, specIMV, and VH, and the domains I0, M0, V0, and H0. Any changes you make that do not affect these relations and domains are unlikely to make your solving faster. In general, look at the last few rules, not just the last one, and try to identify the ``sub-analysis" of the Datalog analysis that seems problematic, and then focus on speeding up just that sub- analysis. \item You can add the \verb+.split+ keyword at the end of certain rules as a hint to bddbddb to decompose those rules into simpler ones that can be solved faster. You can also set property \verb+split_all_rules=yes+ as shorthand for splitting all rules without adding the \verb+.split+ keyword to any of them, though I seldom find splitting all rules helpful. \item You can try to decompose a single Datalog analysis file into two separate Datalog analysis files. Of course, you cannot separate mutually-recursive rules into two different analyses, but if you unnecessarily club together rules that could have gone into different analyses, then they can put conflicting demands on bddbddb (e.g., on the BDD variable order). So if rule 2 uses the result of rule 1 and rule 1 does not use the result of rule 2, then put rule 1 and rule 2 in separate Datalog analyses. \item Observe the sizes of the BDDs representing the relations that are input and output. bddbddb prints both the number of tuples in each relation and the number of nodes in the BDD. Try changing the BDD variable order for the domains of the relation, and observe how the number of nodes in the BDD for that relation change. You will notice that some orders perform remarkably better than others. Then note down these orders as invariants that you will not violate as you tweak other things. \item The relative order of values *within* domains (e.g., in domains named \verb+M+, \verb+H+, \verb+C+, etc. in Chord) affects the performance of bddbddb, but I've never tried changing this and studying its effect. It might be worth trying. For instance, John Whaley's PLDI'04 paper describes a specific way in which he numbers contexts (in domain \verb+C+) and that it was fundamental to the speedup of his ``infinity"-CFA points-to analysis. \item Finally, it is worth emphasizing that BDDs are not magic. If your algorithm itself is fundamentally hard to scale, then BDDs are unlikely to help you a whole lot. Secondly, many things are awkward to encode as integers (e.g., the abstract contexts in domain \verb+C+ in Chord) or as Datalog rules. For instance, I've noticed that summary-based context-sensitive program analyses are hard to express in Datalog. The may-happen-in-parallel analysis provided in Chord shows a relatively simple kind of summary-based analysis that uses the Reps-Horwitz-Sagiv tabulation algorithm. But this is as far as I could get---more complicated summary-based algorithms are best written in Java itself instead of Datalog. \end{enumerate} \section{BDD Representation} \label{sec:bdd} Each domain containing N elements is assigned log2(N) BDD variables in the underlying BDD factory with contiguous IDs. For instance, domain {\tt F0} containing [128..256) elements will be assigned 8 variables with IDs (say) 63,64,65,66,67,68,69,70 and domain {\tt Z0} containing [8..16) elements will be assigned 4 variables with IDs (say) 105,106,107,108. If two domains are uninterleaved in the declared domain order in a Datalog program (i.e., {\tt \_} is used instead of {\tt x} between them), then the BDD variables assigned to those domains are ordered in reverse order in the underlying BDD factory. For instance, the BDD variable order corresponding to the declared domain order {\tt F0\_Z0} is (in level2var format) ``70,69,68,67,66,65,64,63,108,107,106,105". If two domains are interleaved in the declared domain order in a Datalog program (i.e., {\tt x} is used instead of {\tt \_} between them), then the BDD variables assigned to those domains are still ordered in reverse order of IDs in the underlying BDD factory, but they are also interleaved. For instance, the BDD variable order corresponding to the declared domain order {\tt F0xZ0} is (in level2var format) ``70,108,69,107,68,106,67,105,66,65,64,63". Each BDD variable is at a unique ``level" which is its 0-based position in the BDD variable order in the underlying BDD factory. We will next illustrate the format of a BDD stored on disk (in a .bdd file) using the following example: \begin{verbatim} # V0:16 H0:12 # 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 # 82 83 84 85 86 87 88 89 90 91 92 93 28489 134 39 36 33 30 27 24 21 18 15 12 9 6 3 0 81 79 77 75 73 71 69 67 65 63 61 59 57 55 \ 53 51 82 80 78 76 74 72 70 68 66 64 62 60 58 56 54 52 37 34 31 28 25 22 19 16 \ 13 10 7 4 1 117 116 115 114 113 112 111 110 109 108 107 106 50 49 48 47 46 45 \ 44 43 42 41 40 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 \ 86 85 84 83 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 \ 38 35 32 29 26 23 20 17 14 11 8 5 2 287 83 0 1 349123 84 287 0 349138 85 0 349123 ... \end{verbatim} The first comment line indicates the domains in the relation (in the above case, {\tt V0} and {\tt H0}, represented using 16 and 12 unique BDD variables, respectively). If there are $N$ domains, there are $N$ following comment lines, each specifying the BDD variables assigned to the corresponding domain. The following line has two numbers: the number of nodes in the represented BDD (28489 in this case) and the number of variables in the BDD factory from which the BDD was dumped to disk. Note that the number of variables (134 in this case) is not necessarily the number of variables in the represented BDD (16+12=28 in this case) though it is guaranteed to be greater than or equal to it. The following line specifies the BDD variable order in var2level format. In this case, the specified order subsumes {\tt V0\_H0} (notice that levels ``81 79 77 75 73 71 69 67 65 63 61 59 57 55 53 51", which are at positions ``14 15 ... 28 29" in the specified order are lower than levels ``105 104 103 102 101 100 99 98 97 96 95 94" which are at positions ``82 83 .. 92 93"). Each of the following lines specifies a unique node in the represented BDD; it has format ``X V L H" where: \begin{itemize} \item X is the ID of the BDD node \item V is the ID of the bdd variable labeling that node (unless it is 0 or 1, in which case it represents a leaf node) \item L is the ID of the BDD node's low child \item H is the ID of the BDD node's high child \end{itemize} The order of these lines specifying BDD nodes is such that the lines specifying the BDD nodes in the L and H entries of each BDD node are guaranteed to occur before the line specifying that BDD node (for instance, the L entry 287 on the second line and the R entry 349123 on the third line are IDs of BDD nodes specified on the first and second lines, respectively). Note on Terminology: The {\it support} of a BDD {\tt b} is another BDD {\tt r} = {\tt b.support()} that represents all the variables used in {\tt b}. The support BDD {\tt r} is a linear tree each of whose nodes contains a separate variable, the low branch is 0, and high branch is the node representing the next variable. To list all the variables used in a BDD {\tt b} use {\tt r.scanSet()}. Needless to say, {\tt scanSet()} simply walks along the high branches starting at the root of BDD {\tt r}.
{ "alphanum_fraction": 0.7414446346, "avg_line_length": 51.1375, "ext": "tex", "hexsha": "b376a20dea35df5129a755e53d03c28585f66379", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5fd4723b3221785fe97971779bcc44db0e2ce361", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "newmanne/logparser", "max_forks_repo_path": "doc/datalog_analysis.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5fd4723b3221785fe97971779bcc44db0e2ce361", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "newmanne/logparser", "max_issues_repo_path": "doc/datalog_analysis.tex", "max_line_length": 141, "max_stars_count": null, "max_stars_repo_head_hexsha": "5fd4723b3221785fe97971779bcc44db0e2ce361", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "newmanne/logparser", "max_stars_repo_path": "doc/datalog_analysis.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4446, "size": 16364 }
\chapter{Abstract} \lipsum[1-2]
{ "alphanum_fraction": 0.696969697, "avg_line_length": 8.25, "ext": "tex", "hexsha": "3eb589d4b807a222e72cd0485c23e02ef08ca5c3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "55f66af681cd8c08de9648214a44907200138c40", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alexpearce/thesistemplate", "max_forks_repo_path": "chapters/abstract.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "55f66af681cd8c08de9648214a44907200138c40", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alexpearce/thesistemplate", "max_issues_repo_path": "chapters/abstract.tex", "max_line_length": 18, "max_stars_count": 4, "max_stars_repo_head_hexsha": "55f66af681cd8c08de9648214a44907200138c40", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alexpearce/thesistemplate", "max_stars_repo_path": "chapters/abstract.tex", "max_stars_repo_stars_event_max_datetime": "2020-06-05T18:27:58.000Z", "max_stars_repo_stars_event_min_datetime": "2018-07-09T14:45:00.000Z", "num_tokens": 13, "size": 33 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%% RAVEN INTERFACE (RAVEN running RAVEN) %%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{RAVEN Interface} \label{subsec:RAVENInterface} The RAVEN interface is meant to provide the possibility to execute a RAVEN input file driving a set of SLAVE RAVEN calculations. For example, if the user wants to optimize the parameters of a surrogate model (e.g. minimizing the distance between the surrogate predictions and the real data), he can achieve this task by setting up a RAVEN input file (master) that performs an optimization on the feature space characterized by the surrogate model parameters, whose training and validation assessment is performed in the SLAVE RAVEN runs. \\ There are some limitations for this interface: \begin{itemize} \item only one sub-level of RAVEN can be executed (i.e. if the SLAVE RAVEN input file contains the run of another RAVEN SLAVE, the MASTER RAVEN will error out) \item only data from Outstreams of type Print can be collected by the MASTER RAVEN \item only a maximum of two Outstreams can be collected (1 PointSet and 1 HistorySet) \end{itemize} Like for every other interface, most of the RAVEN workflow stays the same independently of which type of Model (i.e. Code) is used. \\ Similarly to any other code interface, the user provides paths to executables and aliases for sampled variables within the \xmlNode{Models} block. The \xmlNode{Code} block will contain attributes \xmlAttr{name} and \xmlAttr{subType}. \xmlAttr{name} identifies that particular \xmlNode{Code} model within RAVEN, and \xmlAttr{subType} specifies which code interface the model will use (In this case \xmlAttr{subType}=``RAVEN''). The \xmlNode{executable} block should contain the absolute or relative (with respect to the current working directory) path to the RAVEN framework script (\textbf{raven\_framework}). \\ In addition to the attributes and xml nodes reported above, the RAVEN accepts the following XML nodes (required and optional): \begin{itemize} % nodes for code \item \xmlNode{outputDatabase}, \xmlDesc{string, required parameter} will specify the \xmlNode{Database} that will be loaded as outputs of the INNER RAVEN. If this node is not specifed, \xmlNode{outputExportOutStreams} may be used instead. \item \xmlNode{outputExportOutStreams}, \xmlDesc{comma separated list, required parameter} will specify the \xmlNode{OutStreams} that will be loaded as outputs of the SLAVE RAVEN. Maximum two \xmlNode{OutStreams} can be listed here (1 for PointSet and/or 1 for HistorySet). \item \xmlNode{conversion}, \xmlDesc{Node,optional parameter} will specify details of conversion scripts to be used in creating the inner RAVEN input file. This node contains the following nodes: \begin{itemize} % nodes for conversion \item \xmlNode{module}, \xmlDesc{Node, optional parameter} a module for directly manipulating the xml structure of perturbed input files. This can be used to modify the template input file in arbitrary ways; however, it should be used with caution, and is considered an advanced method. This node has the following attribute: \begin{itemize} % attributes for module \item \xmlAttr{source}, \xmlDesc{string, required} provides the path to the manipulation module including the module file itself. The following method should be defined in order to perform the input manipulation: \begin{itemize} % functions for module \item \textbf{\textit{modifyInput}}, manipulates the input in arbitrary ways. This method takes two arguments. The first is the root \xmlNode{Simulation} node of the template input file that has already been modified with the perturbed samples (the object is a Python \texttt{xml.etree.ElementTree.Element} object). The second input is a dictionary with all the modification information used to previously modify the template xml. The method should return the modified root. Example: \begin{lstlisting}[language=python] import xml.etree.ElementTree as ET def modifyInput(root, modDict): """ Manipulate the inner RAVEN xml input. @ In, root, ET.Element, perturbed RAVEN input @ In, modDict, dictionary, modifications made to the input @ Out, root, ET.Element, modified RAVEN input """ # adds the file <Input name='aux_inp'>auxfile.txt</Input> to the <Files> node filesNode = root.find('Files') newNode = ET.Element('Input') newNode.text = 'auxfile.txt' newNode.attrib['name'] = 'aux_inp' filesNode.append(newNode) return root \end{lstlisting} \end{itemize} % end functions for module \end{itemize} % end attributes for module \item \xmlNode{module}, \xmlDesc{Node, optional parameter} contains the information about a specific conversion module (python file). This node can be repeated multiple times. This node has the following attribute: \begin{itemize} % attributes for module \item \xmlAttr{source}, \xmlDesc{string, required} provides the path to the conversion module including the module file itself. There are two methods that can be placed in the conversion module: \begin{itemize} % functions for module \item \textbf{\textit{manipulateScalarSampledVariables}}, a method that is aimed to manipulate sampled variables and to create more in case needed. Example: \begin{lstlisting}[language=python] def manipulateScalarSampledVariables(sampledVariables): """ This method is aimed to manipulate scalar variables. The user can create new variables based on the variables sampled by RAVEN @ In, sampledVariables, dict, dictionary of sampled variables ({"var1":value1,"var2":value2}) @ Out, None, the new variables should be added in the "sampledVariables" dictionary """ newVariableValue = sampledVariables['Distributions|Uniform@name:a_dist|lowerBound'] + 1.0 sampledVariables['Distributions|Uniform@name:a_dist|upperBound'] = newVariableValue return \end{lstlisting} \item \textbf{\textit{convertNotScalarSampledVariables}}, a method that is aimed to convert not scalar variables (e.g., 1D arrays) into multiple scalar variables (e.g. \xmlNode{constant}(s) in a sampling strategy). This method is going to be required in case not scalar variables are detected by the interface. Example: \begin{lstlisting}[language=python] def convertNotScalarSampledVariables(noScalarVariables): """ This method is aimed to convert not scalar variables into multiple scalar variables. The user MUST create new variables based on the not Scalar Variables sampled (and passed in) by RAVEN @ In, noScalarVariables, dict, dictionary of sampled variables that are not scalar ({"var1":1Darray1,"var2":1Darray2}) @ Out, newVars, dict, the new variables that have been created based on the not scalar variables contained in "noScalarVariables" dictionary """ oneDimensionalArray = noScalarVariables['temperatureHistory'] newVars = {} for cnt, value in enumerate(oneDimensionalArray): newVars['Samplers|MonteCarlo@name:myMC|constant'+ '@name=temperatureHistory'+str(cnt)] = oneDimensionalArray[cnt] return newVars \end{lstlisting} \end{itemize} % end functions for module \end{itemize} % end attributes for module The \xmlNode{module} node also takes the following node: \begin{itemize} % nodes of module \item \xmlNode{variables}, \xmlDesc{comma-separated list, required} provides a comma-separated list of the variables from the MASTER RAVEN that need to be accessed by the conversion script module. The variables listed here use the pipe naming system (un-aliased names). \end{itemize} % end nodes of module \end{itemize} % end nodes for conversion \end{itemize} % end nodes for code Code input example: \begin{lstlisting}[style=XML] <Code name="RAVENrunningRAVEN" subType="RAVEN"> <executable>../../../raven_framework</executable> <outputExportOutStreams> HistorySetOutStream,PointSetOutStream </outputExportOutStreams> <conversion> <module source=/Users/username/whateverConversionModule.py> <variables>a,b,x,y</variables> </module> </conversion> </Code> \end{lstlisting} Like for every other interface, the syntax of the variable names is important to make the parser understand how to perturb an input file. \\ For the RAVEN interface, a syntax inspired by the XPath nomenclature is used. \begin{lstlisting}[style=XML] <Samplers> <MonteCarlo name="MC_external"> ... <variable name="Models|ROM@subType:SciKitLearn@name:ROM1|C"> <distribution>C_distrib</distribution> </variable> <variable name="Models|ROM@subType:SciKitLearn@name:ROM1|tol"> <distribution>toll_distrib</distribution> </variable> <variable name="Samplers|Grid@name:'+ 'GridName|variable@name:var1|grid@construction:equal@type:value@steps"> <distribution>categorical_step_distrib</distribution> </variable> ... </MonteCarlo> </Samplers> \end{lstlisting} In the above example, it can be inferred that each XML node (subnode) needs to be separated by a ``|'' separator. In addition, every time an XML node has attributes, the user can specify them using the ``@'' separator to specify a value for them. The first variable above will be pointing to the following XML sub-node ( \xmlNode{C}): \begin{lstlisting}[style=XML] <Models> <ROM name="ROM1" subType="SciKitLearn"> ... <C>10.0</C> ... </ROM> </Models> \end{lstlisting} The second variable above will be pointing to the following XML sub-node ( \xmlNode{tol}): \begin{lstlisting}[style=XML] <Models> <ROM name="ROM1" subType="SciKitLearn"> ... <tol>0.0001</tol> ... </ROM> </Models> \end{lstlisting} The third variable above will be pointing to the following XML attribute ( \xmlAttr{steps}): \begin{lstlisting}[style=XML] <Samplers> <Grid name="GridName"> ... <variable name="var1"> ... <grid construction="equal" type="value" steps="1">0 1</grid> ... </variable> ... </MonteCarlo> </Samplers> \end{lstlisting} The above nomenclature must be used for all the variables to be sampled and for the variables generated by the two methods contained, in case, in the module that gets specified by the \xmlNode{conversionModule} in the \xmlNode{Code} section. \\ Finally the SLAVE RAVEN input file (s) must be ``tagged'' with the attribute \xmlAttr{type="raven"} in the Files section. For example, \begin{lstlisting}[style=XML] <Files> <Input name="slaveRavenInputFile" type="raven" > test_rom_trainer.xml </Input> </Files> \end{lstlisting} \subsubsection{ExternalXML and RAVEN interface} Care must be taken if the SLAVE RAVEN uses \xmlNode{ExternalXML} nodes. In this case, each file containing external XML nodes must be added in the \xmlNode{Step} as an \xmlNode{Input} class \xmlAttr{Files} to make sure it gets copied to the individual run directory. The type for these files can be anything, with the exception of type \xmlString{raven}.
{ "alphanum_fraction": 0.7055383557, "avg_line_length": 50.3376623377, "ext": "tex", "hexsha": "304167e5c27d425976c3ec58abe8c5775857ee8a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "FlanFlanagan/raven", "max_forks_repo_path": "doc/user_manual/code_interfaces/ravenRunningRaven.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "FlanFlanagan/raven", "max_issues_repo_path": "doc/user_manual/code_interfaces/ravenRunningRaven.tex", "max_line_length": 178, "max_stars_count": 1, "max_stars_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "FlanFlanagan/raven", "max_stars_repo_path": "doc/user_manual/code_interfaces/ravenRunningRaven.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-10T18:54:09.000Z", "max_stars_repo_stars_event_min_datetime": "2022-03-10T18:54:09.000Z", "num_tokens": 2825, "size": 11628 }
\section{Features} % This section contains a list of requirement statements. This should be of the % form "the system shall..." and worded in a way that satisfaction of the % requirement can be verified. For example, the software shall authenticate % users with at least two identification factors. The software shall produce a graphical user interface that allows the user to interact with the inputed data. % [For Each Feature] \subsection{Import Files} \subsubsection{Description} % Describe the feature and how it fits into the overall product. We will be able to parse data from user-selected .xls, .xlsx, and .csv files and input the data to a SQLite database that will be used to create reports. The graphical user interface is what the user will see after they input their data. It will be in addtion to our main feature of creating reports of the inputed data. \subsubsection{Priority} % Describe the relative importance of this feature. If we cannot parse data from the file types Kitsap Transit is providing, we will need to reassess how we will import data. Importing data from files is an important feature that is a relatively high priority. This feature is important to have since it is what the customer requested, but is not the most important feature fo the software becasue there are other ways to display the collected data. \subsubsection{Stimulus and Response} % What event will trigger the feature and how should the system respond. This % is probably an excerpt of a use case. Reading a file will occur when the user selects to import data. The system will then parse the data and store it in a SQLite database. The user will provide their data as input to our software. Then our software will create reports with that data. After that the graphical user interface will be created, so that the user can access and interact with the data. \subsubsection{Functional Requirements} % Formally state the functional requirement. % The low-level format command shall require authorization by two % system-administrators before beginning the low-level format operation. The program shall parse data from files input by the user and determine the nature of the data as well as store the data in a SQLite database. \subsection{Report Creation} \subsubsection{Description} % Describe the feature and how it fits into the overall product. Using the data imported into the program, the user will be able to select report customization options and create reports in an Excel file format. The main function of this product is to compile transit usage data into reports. \subsubsection{Priority} % Describe the relative importance of this feature. Report creation is a high priority. This feature is the primary purpose of this software. \textit{Kitsap Transit Reports} must be able to match the quality of past reports accurately. \subsubsection{Stimulus and Response} % What event will trigger the feature and how should the system respond. This % is probably an excerpt of a use case. The user will select a reporting range (e.g., 1 month), along with the options for what to include within a report. When the user selects the option to compile a new report, the system will use the reporting options provided by the user to query the software's database and select the data which will be written to a new Excel file. \subsubsection{Functional Requirements} % Formally state the functional requirement. % The low-level format command shall require authorization by two % system-administrators before beginning the low-level format operation. Editing the reporting range shall set the reporting range for report creation.\\ Checking a reporting option shall enable the reporting option for report creation.\\ Clicking the button to create a new report shall use the reporting options to query the system's database for the data needed for the report.\\ The data queried from the database shall be written to a new Excel file which matches the specifications of the user.\\ The system shall open the newly created Excel file. \subsection{Data Visualization} \subsubsection{Description} % Describe the feature and how it fits into the overall product. The user will be able to view visualizations of the data that they have imported into the program. This will useful to Kitsap Transit in analyzing the success of their routes and stops. \subsubsection{Priority} % Describe the relative importance of this feature. This feature is low priority compared to all other features. This feature is not a requirement for Kitsap Transit. The software can function without this feature, but would be an improved user experience with data visualization included. \subsubsection{Stimulus and Response} % What event will trigger the feature and how should the system respond. This % is probably an excerpt of a use case. When the user selects a category of data visualization and the necessary parameters for said visualization, the system will query the software's database and compile the results into a format which can be represented by a chart or graph. \subsubsection{Functional Requirements} % Formally state the functional requirement. % The low-level format command shall require authorization by two % system-administrators before beginning the low-level format operation. Selecting a data visualization/comparison option shall call a system function which queries the system's database for the relevant data.\\ The queried data shall be formatted such that it can be displayed by a chart or graph within the software's graphical user interface.\\ The graphical user interface shall be updated to display the chart or graph specified by the user. \subsection{Update Route Information} \subsubsection{Description} % Describe the feature and how it fits into the overall product. In order to import data into the system's database, as well as create reports and visualizations, the system needs to have up-to-date information about current bus routes (e.g., the route ID's for each route, the region associated with each route, what stops are contained in each route). This feature will allow the user to update route and stop information included in our system. \subsubsection{Priority} % Describe the relative importance of this feature. This is an important part of the software that is necessary for nearly all aspects of the system. It is heavily tied to the functionality of file data importing. \subsubsection{Stimulus and Response} % What event will trigger the feature and how should the system respond. This % is probably an excerpt of a use case. When Kitsap Transit changes their bus routes, they will select an option to edit information on the current bus routes. The system will display a list of the current routes, as well as the stops associated with each route. The user can add a new route or stop, or update a pre-existing route or stop. When the user submits update route information, the system will update its database to match the user's specifications. \subsubsection{Functional Requirements} % Formally state the functional requirement. % The low-level format command shall require authorization by two % system-administrators before beginning the low-level format operation. Selecting the option to update route information shall query the system's database for all route information.\\ The system shall display route information to the user.\\ The system shall allow the user to update route information from the graphic user interface or select an option to add a new route. \\ Clicking an option to save changes shall update the system's database to reflect the user's changes. \subsection{Graphical User Interface} \subsubsection{Description} % Describe the feature and how it fits into the overall product. The graphical user interface is what the user will both see and interact with when using this software. It will aid users in the process of creating custom reports from the data they import into the program. \subsubsection{Priority} % Describe the relative importance of this feature. Although there are other ways to display information to the user and get user input, a graphical user interface is relatively important to have since it will make creating reports easier for users and it is what the customer requested. \subsubsection{Stimulus and Response} % What event will trigger the feature and how should the system respond. This % is probably an excerpt of a use case. The graphical user interface will launch immediately when the user starts up the program. Buttons and text input within the graphical user interface will call functions within the system that will handle the user's requests. When the system is ready to display results to the user, the graphical user interface will update to show the user relevant information. \subsubsection{Functional Requirements} % Formally state the functional requirement. % The low-level format command shall require authorization by two % system-administrators before beginning the low-level format operation. The system shall display a graphical user interface upon starting the application.\\ The system shall listen for user input within the graphical user interface and call relevant functions upon user request.\\ The system shall update the graphical user interface based on what the user has requested to see. The software shall require data to be provided as input in order for the graphical user interface to be generated.
{ "alphanum_fraction": 0.8039153773, "avg_line_length": 66.9084507042, "ext": "tex", "hexsha": "a3822424e11cfebf2e162bf69d0e5279c4cbe284", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9de689e5a1868963c0fc7058415dac0b884eb592", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Zach076/KitsapTransitReports", "max_forks_repo_path": "docs/software_requirements/features.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9de689e5a1868963c0fc7058415dac0b884eb592", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Zach076/KitsapTransitReports", "max_issues_repo_path": "docs/software_requirements/features.tex", "max_line_length": 420, "max_stars_count": null, "max_stars_repo_head_hexsha": "9de689e5a1868963c0fc7058415dac0b884eb592", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Zach076/KitsapTransitReports", "max_stars_repo_path": "docs/software_requirements/features.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1879, "size": 9501 }
%% SECTION HEADER ///////////////////////////////////////////////////////////////////////////////////// \section{Example: Equations} \label{sec12} %% SECTION CONTENT //////////////////////////////////////////////////////////////////////////////////// This section shows a few equation examples. Labels can be used to reference equations. \begin{itemize} \item Example (Equation \ref{eq:1_1}) \item Example (Equation \ref{eq:1_2}) \item Example (Equation \ref{eq:1_3}) \item Example (Equation \ref{eq:1_4}) \item Example (Equation \ref{eq:1_5}) \end{itemize} \begin{equation} \label{eq:1_1} \overline{M}_{i}=\iint\limits_A \rho\;u_{i}\left(u_{k}\;n_{k} \right)\;dA \end{equation} \begin{equation} \label{eq:1_2} V_{REF} = \left( \frac{2 \times 144 \times P_{DYN}}{\rho} \right)^{\frac{1}{2}} \end{equation} \begin{equation} \label{eq:1_3} V_{MOM} = \left( \frac{A_{TIP} (\frac{V_{REF}}{2})^{2} + A_{MIDDLE} V_{REF}^{2} + A_{HUB} V_{HUB}^{2}}{A_{JET} V_{AVG}} \right) \end{equation} \begin{equation} \label{eq:1_4} \begin{aligned} & Q_{J}= \int_0^\delta Vdy+\left(y-\delta \right)b\;V_{s}\\ & Q_{J}= b\;V_{s} \int_0^\delta \left(\frac{y}{\delta} \right)^\frac{1}{n}dy+\left(y-\delta \right)b\;V_{s}\\ & Q_{J}=\frac{b\;V_{s}}{\left(\frac{n+1}{n} \right)}\delta+\left(y-\delta \right)b\;V_{s}\\ & Q_{J}=b\;V_{s}\left(y-\frac{\delta}{n+1} \right) \end{aligned} \end{equation} \begin{equation} \label{eq:1_5} B_{K_{T_{Jx}}}=\sqrt{\left(\theta_{Q_{J}}B_{Q_{J}} \right )^{2}+\left(\theta_{D}B_{D} \right )^{2}+\left(\theta_{n}B_{n} \right )^{2}+\left(\theta_{ \alpha }B_{ \alpha } \right )^{2}+\left(\theta_{\rho}\left(B_{\rho}+\theta_{\rho tw}B_{tw} \right ) \right )} \end{equation}
{ "alphanum_fraction": 0.5591459896, "avg_line_length": 38.5111111111, "ext": "tex", "hexsha": "201c52728467c77996b5c3c420dd31b536841530", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2020-09-22T10:10:01.000Z", "max_forks_repo_forks_event_min_datetime": "2015-09-11T05:12:18.000Z", "max_forks_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c", "max_forks_repo_licenses": [ "RSA-MD" ], "max_forks_repo_name": "IFFM-PAS-MISD/aidd", "max_forks_repo_path": "reports/project_reports/Ijjeh_thesis_template/Chapters/Intro/sect12.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "RSA-MD" ], "max_issues_repo_name": "IFFM-PAS-MISD/aidd", "max_issues_repo_path": "reports/project_reports/Ijjeh_thesis_template/Chapters/Intro/sect12.tex", "max_line_length": 259, "max_stars_count": 3, "max_stars_repo_head_hexsha": "9e8255d5406211b07253fca29788a3557860edc0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SeaShadow/LaTeX-AMC-PhD-Thesis-Template", "max_stars_repo_path": "Chapters/Chapter1/sect12.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-16T10:40:13.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-05T01:29:35.000Z", "num_tokens": 666, "size": 1733 }